summaryrefslogtreecommitdiffstats
path: root/src/ceph-volume/ceph_volume/tests
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/ceph-volume/ceph_volume/tests/__init__.py0
-rw-r--r--src/ceph-volume/ceph_volume/tests/api/test_lvm.py870
-rw-r--r--src/ceph-volume/ceph_volume/tests/conftest.py295
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/__init__.py0
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py0
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py414
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py280
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py8
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py48
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py59
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py265
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py174
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py45
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py236
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/raw/__init__.py0
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py97
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py200
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py68
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/simple/test_trigger.py45
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/test_zap.py28
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/.gitignore5
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/README.md24
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/Vagrantfile429
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/group_vars/all33
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/group_vars/all33
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test_zap.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/group_vars/all34
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/test_zap.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/group_vars/all34
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test_zap.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/group_vars/all30
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test_zap.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/group_vars/all29
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test_zap.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/group_vars/all33
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/test_zap.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/group_vars/all33
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test_zap.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/group_vars/all32
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/test_zap.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/group_vars/all32
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test_zap.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/group_vars/all32
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test_zap.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/group_vars/all29
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test_zap.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/vagrant_variables.yml56
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/noop.yml12
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml188
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml72
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml64
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml34
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini78
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/.DS_Storebin0 -> 6148 bytes
l---------src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/group_vars/all30
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test_zap.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/group_vars/all29
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test_zap.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/group_vars/all32
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test_zap.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/group_vars/all29
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test_zap.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/group_vars/all29
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/test.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/group_vars/all30
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/setup.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml104
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/group_vars/all32
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/test.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/group_vars/all33
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/setup.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml108
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/vagrant_variables.yml56
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml27
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml148
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml169
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini79
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/group_vars/all29
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/test.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/group_vars/all30
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/setup.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml104
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/vagrant_variables.yml56
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/group_vars/all32
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/setup.yml1
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/test.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/vagrant_variables.yml54
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/group_vars/all33
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/hosts8
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/setup.yml1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/test.yml108
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/vagrant_variables.yml54
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml153
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/scripts/generate_ssh_config.sh14
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/scripts/output.py5
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/scripts/test_unicode.sh35
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_reload.sh21
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh12
l---------src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/group_vars/all19
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/test.yml31
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/group_vars/all22
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/test.yml15
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/group_vars/all22
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/test.yml31
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/group_vars/all19
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/test.yml29
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/group_vars/all22
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/test.yml31
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/group_vars/all22
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/test.yml31
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/vagrant_variables.yml73
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini66
l---------src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/group_vars/all19
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/test.yml31
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/group_vars/all22
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/test.yml31
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/group_vars/all22
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/test.yml31
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/group_vars/all19
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/test.yml15
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/group_vars/all22
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/test.yml31
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/group_vars/all22
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/test.yml31
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/vagrant_variables.yml73
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/tests/__init__.py0
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py103
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/tests/osd/__init__.py0
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/tests/osd/test_osds.py60
-rw-r--r--src/ceph-volume/ceph_volume/tests/systemd/test_main.py51
-rw-r--r--src/ceph-volume/ceph_volume/tests/systemd/test_systemctl.py21
-rw-r--r--src/ceph-volume/ceph_volume/tests/test_configuration.py117
-rw-r--r--src/ceph-volume/ceph_volume/tests/test_decorators.py71
-rw-r--r--src/ceph-volume/ceph_volume/tests/test_inventory.py110
-rw-r--r--src/ceph-volume/ceph_volume/tests/test_main.py69
-rw-r--r--src/ceph-volume/ceph_volume/tests/test_process.py92
-rw-r--r--src/ceph-volume/ceph_volume/tests/test_terminal.py143
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py89
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_device.py577
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_disk.py540
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_encryption.py53
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_prepare.py422
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_system.py279
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_util.py116
304 files changed, 12214 insertions, 0 deletions
diff --git a/src/ceph-volume/ceph_volume/tests/__init__.py b/src/ceph-volume/ceph_volume/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/__init__.py
diff --git a/src/ceph-volume/ceph_volume/tests/api/test_lvm.py b/src/ceph-volume/ceph_volume/tests/api/test_lvm.py
new file mode 100644
index 00000000..f01ceb4f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/api/test_lvm.py
@@ -0,0 +1,870 @@
+import os
+import pytest
+from mock.mock import patch
+from ceph_volume import process, exceptions
+from ceph_volume.api import lvm as api
+
+
+class TestParseTags(object):
+
+ def test_no_tags_means_empty_dict(self):
+ result = api.parse_tags('')
+ assert result == {}
+
+ def test_single_tag_gets_parsed(self):
+ result = api.parse_tags('ceph.osd_something=1')
+ assert result == {'ceph.osd_something': '1'}
+
+ def test_non_ceph_tags_are_skipped(self):
+ result = api.parse_tags('foo')
+ assert result == {}
+
+ def test_mixed_non_ceph_tags(self):
+ result = api.parse_tags('foo,ceph.bar=1')
+ assert result == {'ceph.bar': '1'}
+
+ def test_multiple_csv_expands_in_dict(self):
+ result = api.parse_tags('ceph.osd_something=1,ceph.foo=2,ceph.fsid=0000')
+ # assert them piecemeal to avoid the un-ordered dict nature
+ assert result['ceph.osd_something'] == '1'
+ assert result['ceph.foo'] == '2'
+ assert result['ceph.fsid'] == '0000'
+
+
+class TestVolume(object):
+
+ def test_is_ceph_device(self):
+ lv_tags = "ceph.type=data,ceph.osd_id=0"
+ osd = api.Volume(lv_name='osd/volume', lv_tags=lv_tags)
+ assert api.is_ceph_device(osd)
+
+ @pytest.mark.parametrize('dev',[
+ '/dev/sdb',
+ api.VolumeGroup(vg_name='foo'),
+ api.Volume(lv_name='vg/no_osd', lv_tags='', lv_path='lv/path'),
+ api.Volume(lv_name='vg/no_osd', lv_tags='ceph.osd_id=null', lv_path='lv/path'),
+ None,
+ ])
+ def test_is_not_ceph_device(self, dev):
+ assert not api.is_ceph_device(dev)
+
+ def test_no_empty_lv_name(self):
+ with pytest.raises(ValueError):
+ api.Volume(lv_name='', lv_tags='')
+
+
+class TestVolumeGroup(object):
+
+ def test_volume_group_no_empty_name(self):
+ with pytest.raises(ValueError):
+ api.VolumeGroup(vg_name='')
+
+
+class TestVolumeGroupFree(object):
+
+ def test_integer_gets_produced(self):
+ vg = api.VolumeGroup(vg_name='nosize', vg_free_count=100, vg_extent_size=4194304)
+ assert vg.free == 100 * 4194304
+
+
+class TestCreateLVs(object):
+
+ def setup(self):
+ self.vg = api.VolumeGroup(vg_name='ceph',
+ vg_extent_size=1073741824,
+ vg_extent_count=99999999,
+ vg_free_count=999)
+
+ def test_creates_correct_lv_number_from_parts(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
+ lvs = api.create_lvs(self.vg, parts=4)
+ assert len(lvs) == 4
+
+ def test_suffixes_the_size_arg(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
+ lvs = api.create_lvs(self.vg, parts=4)
+ assert lvs[0][1]['extents'] == 249
+
+ def test_only_uses_free_size(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
+ vg = api.VolumeGroup(vg_name='ceph',
+ vg_extent_size=1073741824,
+ vg_extent_count=99999999,
+ vg_free_count=1000)
+ lvs = api.create_lvs(vg, parts=4)
+ assert lvs[0][1]['extents'] == 250
+
+ def test_null_tags_are_set_by_default(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
+ kwargs = api.create_lvs(self.vg, parts=4)[0][1]
+ assert list(kwargs['tags'].values()) == ['null', 'null', 'null', 'null']
+
+ def test_fallback_to_one_part(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm.create_lv', lambda *a, **kw: (a, kw))
+ lvs = api.create_lvs(self.vg)
+ assert len(lvs) == 1
+
+
+class TestVolumeGroupSizing(object):
+
+ def setup(self):
+ self.vg = api.VolumeGroup(vg_name='ceph',
+ vg_extent_size=1073741824,
+ vg_free_count=1024)
+
+ def test_parts_and_size_errors(self):
+ with pytest.raises(ValueError) as error:
+ self.vg.sizing(parts=4, size=10)
+ assert "Cannot process sizing" in str(error.value)
+
+ def test_zero_parts_produces_100_percent(self):
+ result = self.vg.sizing(parts=0)
+ assert result['percentages'] == 100
+
+ def test_two_parts_produces_50_percent(self):
+ result = self.vg.sizing(parts=2)
+ assert result['percentages'] == 50
+
+ def test_two_parts_produces_half_size(self):
+ result = self.vg.sizing(parts=2)
+ assert result['sizes'] == 512
+
+ def test_half_size_produces_round_sizes(self):
+ result = self.vg.sizing(size=512)
+ assert result['sizes'] == 512
+ assert result['percentages'] == 50
+ assert result['parts'] == 2
+
+ def test_bit_more_than_half_size_allocates_full_size(self):
+ # 513 can't allocate more than 1, so it just fallsback to using the
+ # whole device
+ result = self.vg.sizing(size=513)
+ assert result['sizes'] == 1024
+ assert result['percentages'] == 100
+ assert result['parts'] == 1
+
+ def test_extents_are_halfed_rounded_down(self):
+ result = self.vg.sizing(size=512)
+ assert result['extents'] == 512
+
+ def test_bit_less_size_rounds_down(self):
+ result = self.vg.sizing(size=129)
+ assert result['sizes'] == 146
+ assert result['percentages'] == 14
+ assert result['parts'] == 7
+
+ def test_unable_to_allocate_past_free_size(self):
+ with pytest.raises(exceptions.SizeAllocationError):
+ self.vg.sizing(size=2048)
+
+
+class TestRemoveLV(object):
+
+ def test_removes_lv(self, monkeypatch):
+ def mock_call(cmd, **kw):
+ return ('', '', 0)
+ monkeypatch.setattr(process, 'call', mock_call)
+ assert api.remove_lv("vg/lv")
+
+ def test_removes_lv_object(self, fake_call):
+ foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='')
+ api.remove_lv(foo_volume)
+ # last argument from the list passed to process.call
+ assert fake_call.calls[0]['args'][0][-1] == '/path'
+
+ def test_fails_to_remove_lv(self, monkeypatch):
+ def mock_call(cmd, **kw):
+ return ('', '', 1)
+ monkeypatch.setattr(process, 'call', mock_call)
+ with pytest.raises(RuntimeError):
+ api.remove_lv("vg/lv")
+
+
+class TestCreateLV(object):
+
+ def setup(self):
+ self.foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='')
+ self.foo_group = api.VolumeGroup(vg_name='foo_group',
+ vg_extent_size="4194304",
+ vg_extent_count="100",
+ vg_free_count="100")
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_first_lv')
+ def test_uses_size(self, m_get_first_lv, m_call, m_run, monkeypatch):
+ m_get_first_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg=self.foo_group, size=419430400, tags={'ceph.type': 'data'})
+ expected = ['lvcreate', '--yes', '-l', '100', '-n', 'foo-0', 'foo_group']
+ m_run.assert_called_with(expected)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_first_lv')
+ def test_uses_size_adjust_if_1percent_over(self, m_get_first_lv, m_call, m_run, monkeypatch):
+ foo_volume = api.Volume(lv_name='foo', lv_path='/path', vg_name='foo_group', lv_tags='')
+ foo_group = api.VolumeGroup(vg_name='foo_group',
+ vg_extent_size="4194304",
+ vg_extent_count="1000",
+ vg_free_count="1000")
+ m_get_first_lv.return_value = foo_volume
+ # 423624704 should be just under 1% off of the available size 419430400
+ api.create_lv('foo', 0, vg=foo_group, size=4232052736, tags={'ceph.type': 'data'})
+ expected = ['lvcreate', '--yes', '-l', '1000', '-n', 'foo-0', 'foo_group']
+ m_run.assert_called_with(expected)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_first_lv')
+ def test_uses_size_too_large(self, m_get_first_lv, m_call, m_run, monkeypatch):
+ m_get_first_lv.return_value = self.foo_volume
+ with pytest.raises(RuntimeError):
+ api.create_lv('foo', 0, vg=self.foo_group, size=5368709120, tags={'ceph.type': 'data'})
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_first_lv')
+ def test_uses_extents(self, m_get_first_lv, m_call, m_run, monkeypatch):
+ m_get_first_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg=self.foo_group, extents='50', tags={'ceph.type': 'data'})
+ expected = ['lvcreate', '--yes', '-l', '50', '-n', 'foo-0', 'foo_group']
+ m_run.assert_called_with(expected)
+
+ @pytest.mark.parametrize("test_input,expected",
+ [(2, 50),
+ (3, 33),])
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_first_lv')
+ def test_uses_slots(self, m_get_first_lv, m_call, m_run, monkeypatch, test_input, expected):
+ m_get_first_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg=self.foo_group, slots=test_input, tags={'ceph.type': 'data'})
+ expected = ['lvcreate', '--yes', '-l', str(expected), '-n', 'foo-0', 'foo_group']
+ m_run.assert_called_with(expected)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_first_lv')
+ def test_uses_all(self, m_get_first_lv, m_call, m_run, monkeypatch):
+ m_get_first_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg=self.foo_group, tags={'ceph.type': 'data'})
+ expected = ['lvcreate', '--yes', '-l', '100%FREE', '-n', 'foo-0', 'foo_group']
+ m_run.assert_called_with(expected)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.Volume.set_tags')
+ @patch('ceph_volume.api.lvm.get_first_lv')
+ def test_calls_to_set_tags_default(self, m_get_first_lv, m_set_tags, m_call, m_run, monkeypatch):
+ m_get_first_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg=self.foo_group)
+ tags = {
+ "ceph.osd_id": "null",
+ "ceph.type": "null",
+ "ceph.cluster_fsid": "null",
+ "ceph.osd_fsid": "null",
+ }
+ m_set_tags.assert_called_with(tags)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.Volume.set_tags')
+ @patch('ceph_volume.api.lvm.get_first_lv')
+ def test_calls_to_set_tags_arg(self, m_get_first_lv, m_set_tags, m_call, m_run, monkeypatch):
+ m_get_first_lv.return_value = self.foo_volume
+ api.create_lv('foo', 0, vg=self.foo_group, tags={'ceph.type': 'data'})
+ tags = {
+ "ceph.type": "data",
+ "ceph.data_device": "/path"
+ }
+ m_set_tags.assert_called_with(tags)
+
+ @patch('ceph_volume.api.lvm.process.run')
+ @patch('ceph_volume.api.lvm.process.call')
+ @patch('ceph_volume.api.lvm.get_device_vgs')
+ @patch('ceph_volume.api.lvm.create_vg')
+ @patch('ceph_volume.api.lvm.get_first_lv')
+ def test_create_vg(self, m_get_first_lv, m_create_vg, m_get_device_vgs, m_call,
+ m_run, monkeypatch):
+ m_get_first_lv.return_value = self.foo_volume
+ m_get_device_vgs.return_value = []
+ api.create_lv('foo', 0, device='dev/foo', size='5G', tags={'ceph.type': 'data'})
+ m_create_vg.assert_called_with('dev/foo', name_prefix='ceph')
+
+
+class TestTags(object):
+
+ def setup(self):
+ self.foo_volume_clean = api.Volume(lv_name='foo_clean', lv_path='/pathclean',
+ vg_name='foo_group',
+ lv_tags='')
+ self.foo_volume = api.Volume(lv_name='foo', lv_path='/path',
+ vg_name='foo_group',
+ lv_tags='ceph.foo0=bar0,ceph.foo1=bar1,ceph.foo2=bar2')
+
+ def test_set_tag(self, monkeypatch, capture):
+ monkeypatch.setattr(process, 'run', capture)
+ monkeypatch.setattr(process, 'call', capture)
+ self.foo_volume_clean.set_tag('foo', 'bar')
+ expected = ['lvchange', '--addtag', 'foo=bar', '/pathclean']
+ assert capture.calls[0]['args'][0] == expected
+ assert self.foo_volume_clean.tags == {'foo': 'bar'}
+
+ def test_set_clear_tag(self, monkeypatch, capture):
+ monkeypatch.setattr(process, 'run', capture)
+ monkeypatch.setattr(process, 'call', capture)
+ self.foo_volume_clean.set_tag('foo', 'bar')
+ assert self.foo_volume_clean.tags == {'foo': 'bar'}
+ self.foo_volume_clean.clear_tag('foo')
+ expected = ['lvchange', '--deltag', 'foo=bar', '/pathclean']
+ assert self.foo_volume_clean.tags == {}
+ assert capture.calls[1]['args'][0] == expected
+
+ def test_set_tags(self, monkeypatch, capture):
+ monkeypatch.setattr(process, 'run', capture)
+ monkeypatch.setattr(process, 'call', capture)
+ tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'bar1', 'ceph.foo2': 'bar2'}
+ assert self.foo_volume.tags == tags
+
+ tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'baz1', 'ceph.foo2': 'baz2'}
+ self.foo_volume.set_tags(tags)
+ assert self.foo_volume.tags == tags
+
+ self.foo_volume.set_tag('ceph.foo1', 'other1')
+ tags['ceph.foo1'] = 'other1'
+ assert self.foo_volume.tags == tags
+
+ expected = [
+ sorted(['lvchange', '--deltag', 'ceph.foo0=bar0', '--deltag',
+ 'ceph.foo1=bar1', '--deltag', 'ceph.foo2=bar2', '/path']),
+ sorted(['lvchange', '--deltag', 'ceph.foo1=baz1', '/path']),
+ sorted(['lvchange', '--addtag', 'ceph.foo0=bar0', '--addtag',
+ 'ceph.foo1=baz1', '--addtag', 'ceph.foo2=baz2', '/path']),
+ sorted(['lvchange', '--addtag', 'ceph.foo1=other1', '/path']),
+ ]
+ # The order isn't guaranted
+ for call in capture.calls:
+ assert sorted(call['args'][0]) in expected
+ assert len(capture.calls) == len(expected)
+
+ def test_clear_tags(self, monkeypatch, capture):
+ monkeypatch.setattr(process, 'run', capture)
+ monkeypatch.setattr(process, 'call', capture)
+ tags = {'ceph.foo0': 'bar0', 'ceph.foo1': 'bar1', 'ceph.foo2': 'bar2'}
+
+ self.foo_volume_clean.set_tags(tags)
+ assert self.foo_volume_clean.tags == tags
+ self.foo_volume_clean.clear_tags()
+ assert self.foo_volume_clean.tags == {}
+
+ expected = [
+ sorted(['lvchange', '--addtag', 'ceph.foo0=bar0', '--addtag',
+ 'ceph.foo1=bar1', '--addtag', 'ceph.foo2=bar2',
+ '/pathclean']),
+ sorted(['lvchange', '--deltag', 'ceph.foo0=bar0', '--deltag',
+ 'ceph.foo1=bar1', '--deltag', 'ceph.foo2=bar2',
+ '/pathclean']),
+ ]
+ # The order isn't guaranted
+ for call in capture.calls:
+ assert sorted(call['args'][0]) in expected
+ assert len(capture.calls) == len(expected)
+
+
+class TestExtendVG(object):
+
+ def setup(self):
+ self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='')
+
+ def test_uses_single_device_in_list(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True)
+ api.extend_vg(self.foo_volume, ['/dev/sda'])
+ expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda']
+ assert fake_run.calls[0]['args'][0] == expected
+
+ def test_uses_single_device(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True)
+ api.extend_vg(self.foo_volume, '/dev/sda')
+ expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda']
+ assert fake_run.calls[0]['args'][0] == expected
+
+ def test_uses_multiple_devices(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True)
+ api.extend_vg(self.foo_volume, ['/dev/sda', '/dev/sdb'])
+ expected = ['vgextend', '--force', '--yes', 'foo', '/dev/sda', '/dev/sdb']
+ assert fake_run.calls[0]['args'][0] == expected
+
+
+class TestReduceVG(object):
+
+ def setup(self):
+ self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='')
+
+ def test_uses_single_device_in_list(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True)
+ api.reduce_vg(self.foo_volume, ['/dev/sda'])
+ expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda']
+ assert fake_run.calls[0]['args'][0] == expected
+
+ def test_uses_single_device(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True)
+ api.reduce_vg(self.foo_volume, '/dev/sda')
+ expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda']
+ assert fake_run.calls[0]['args'][0] == expected
+
+ def test_uses_multiple_devices(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True)
+ api.reduce_vg(self.foo_volume, ['/dev/sda', '/dev/sdb'])
+ expected = ['vgreduce', '--force', '--yes', 'foo', '/dev/sda', '/dev/sdb']
+ assert fake_run.calls[0]['args'][0] == expected
+
+
+class TestCreateVG(object):
+
+ def setup(self):
+ self.foo_volume = api.VolumeGroup(vg_name='foo', lv_tags='')
+
+ def test_no_name(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True)
+ api.create_vg('/dev/sda')
+ result = fake_run.calls[0]['args'][0]
+ assert '/dev/sda' in result
+ assert result[-2].startswith('ceph-')
+
+ def test_devices_list(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True)
+ api.create_vg(['/dev/sda', '/dev/sdb'], name='ceph')
+ result = fake_run.calls[0]['args'][0]
+ expected = ['vgcreate', '--force', '--yes', 'ceph', '/dev/sda', '/dev/sdb']
+ assert result == expected
+
+ def test_name_prefix(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True)
+ api.create_vg('/dev/sda', name_prefix='master')
+ result = fake_run.calls[0]['args'][0]
+ assert '/dev/sda' in result
+ assert result[-2].startswith('master-')
+
+ def test_specific_name(self, monkeypatch, fake_run):
+ monkeypatch.setattr(api, 'get_first_vg', lambda **kw: True)
+ api.create_vg('/dev/sda', name='master')
+ result = fake_run.calls[0]['args'][0]
+ assert '/dev/sda' in result
+ assert result[-2] == 'master'
+
+#
+# The following tests are pretty gnarly. VDO detection is very convoluted and
+# involves correlating information from device mappers, realpaths, slaves of
+# those mappers, and parents or related mappers. This makes it very hard to
+# patch nicely or keep tests short and readable. These tests are trying to
+# ensure correctness, the better approach will be to do some functional testing
+# with VDO.
+#
+
+
+@pytest.fixture
+def disable_kvdo_path(monkeypatch):
+ monkeypatch.setattr('os.path.isdir', lambda x, **kw: False)
+
+
+@pytest.fixture
+def enable_kvdo_path(monkeypatch):
+ monkeypatch.setattr('os.path.isdir', lambda x, **kw: True)
+
+
+# Stub for os.listdir
+
+
+class ListDir(object):
+
+ def __init__(self, paths):
+ self.paths = paths
+ self._normalize_paths()
+ self.listdir = os.listdir
+
+ def _normalize_paths(self):
+ for k, v in self.paths.items():
+ self.paths[k.rstrip('/')] = v.rstrip('/')
+
+ def add(self, original, fake):
+ self.paths[original.rstrip('/')] = fake.rstrip('/')
+
+ def __call__(self, path):
+ return self.listdir(self.paths[path.rstrip('/')])
+
+
+@pytest.fixture(scope='function')
+def listdir(monkeypatch):
+ def apply(paths=None, stub=None):
+ if not stub:
+ stub = ListDir(paths)
+ if paths:
+ for original, fake in paths.items():
+ stub.add(original, fake)
+
+ monkeypatch.setattr('os.listdir', stub)
+ return apply
+
+
+@pytest.fixture(scope='function')
+def makedirs(tmpdir):
+ def create(directory):
+ path = os.path.join(str(tmpdir), directory)
+ os.makedirs(path)
+ return path
+ create.base = str(tmpdir)
+ return create
+
+
+class TestIsVdo(object):
+
+ def test_no_vdo_dir(self, disable_kvdo_path):
+ assert api._is_vdo('/path') is False
+
+ def test_exceptions_return_false(self, monkeypatch):
+ def throw():
+ raise Exception()
+ monkeypatch.setattr('ceph_volume.api.lvm._is_vdo', throw)
+ assert api.is_vdo('/path') == '0'
+
+ def test_is_vdo_returns_a_string(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.api.lvm._is_vdo', lambda x, **kw: True)
+ assert api.is_vdo('/path') == '1'
+
+ def test_kvdo_dir_no_devices(self, makedirs, enable_kvdo_path, listdir, monkeypatch):
+ kvdo_path = makedirs('sys/kvdo')
+ listdir(paths={'/sys/kvdo': kvdo_path})
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: [])
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: [])
+ assert api._is_vdo('/dev/mapper/vdo0') is False
+
+ def test_vdo_slaves_found_and_matched(self, makedirs, enable_kvdo_path, listdir, monkeypatch):
+ kvdo_path = makedirs('sys/kvdo')
+ listdir(paths={'/sys/kvdo': kvdo_path})
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: ['/dev/dm-3'])
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: [])
+ assert api._is_vdo('/dev/dm-3') is True
+
+ def test_vdo_parents_found_and_matched(self, makedirs, enable_kvdo_path, listdir, monkeypatch):
+ kvdo_path = makedirs('sys/kvdo')
+ listdir(paths={'/sys/kvdo': kvdo_path})
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_slaves', lambda x, **kw: [])
+ monkeypatch.setattr('ceph_volume.api.lvm._vdo_parents', lambda x, **kw: ['/dev/dm-4'])
+ assert api._is_vdo('/dev/dm-4') is True
+
+
+class TestVdoSlaves(object):
+
+ def test_slaves_are_not_found(self, makedirs, listdir, monkeypatch):
+ slaves_path = makedirs('sys/block/vdo0/slaves')
+ listdir(paths={'/sys/block/vdo0/slaves': slaves_path})
+ monkeypatch.setattr('ceph_volume.api.lvm.os.path.exists', lambda x, **kw: True)
+ result = sorted(api._vdo_slaves(['vdo0']))
+ assert '/dev/mapper/vdo0' in result
+ assert 'vdo0' in result
+
+ def test_slaves_are_found(self, makedirs, listdir, monkeypatch):
+ slaves_path = makedirs('sys/block/vdo0/slaves')
+ makedirs('sys/block/vdo0/slaves/dm-4')
+ makedirs('dev/mapper/vdo0')
+ listdir(paths={'/sys/block/vdo0/slaves': slaves_path})
+ monkeypatch.setattr('ceph_volume.api.lvm.os.path.exists', lambda x, **kw: True)
+ result = sorted(api._vdo_slaves(['vdo0']))
+ assert '/dev/dm-4' in result
+ assert 'dm-4' in result
+
+
+class TestVDOParents(object):
+
+ def test_parents_are_found(self, makedirs, listdir):
+ block_path = makedirs('sys/block')
+ slaves_path = makedirs('sys/block/dm-4/slaves')
+ makedirs('sys/block/dm-4/slaves/dm-3')
+ listdir(paths={
+ '/sys/block/dm-4/slaves': slaves_path,
+ '/sys/block': block_path})
+ result = api._vdo_parents(['dm-3'])
+ assert '/dev/dm-4' in result
+ assert 'dm-4' in result
+
+ def test_parents_are_not_found(self, makedirs, listdir):
+ block_path = makedirs('sys/block')
+ slaves_path = makedirs('sys/block/dm-4/slaves')
+ makedirs('sys/block/dm-4/slaves/dm-5')
+ listdir(paths={
+ '/sys/block/dm-4/slaves': slaves_path,
+ '/sys/block': block_path})
+ result = api._vdo_parents(['dm-3'])
+ assert result == []
+
+
+class TestSplitNameParser(object):
+
+ def test_keys_are_parsed_without_prefix(self):
+ line = ["DM_VG_NAME='/dev/mapper/vg';DM_LV_NAME='lv';DM_LV_LAYER=''"]
+ result = api._splitname_parser(line)
+ assert result['VG_NAME'] == 'vg'
+ assert result['LV_NAME'] == 'lv'
+ assert result['LV_LAYER'] == ''
+
+ def test_vg_name_sans_mapper(self):
+ line = ["DM_VG_NAME='/dev/mapper/vg';DM_LV_NAME='lv';DM_LV_LAYER=''"]
+ result = api._splitname_parser(line)
+ assert '/dev/mapper' not in result['VG_NAME']
+
+
+class TestGetDeviceVgs(object):
+
+ @patch('ceph_volume.process.call')
+ @patch('ceph_volume.api.lvm._output_parser')
+ def test_get_device_vgs_with_empty_pv(self, patched_output_parser, pcall):
+ patched_output_parser.return_value = [{'vg_name': ''}]
+ pcall.return_value = ('', '', '')
+ vgs = api.get_device_vgs('/dev/foo')
+ assert vgs == []
+
+class TestGetDeviceLvs(object):
+
+ @patch('ceph_volume.process.call')
+ @patch('ceph_volume.api.lvm._output_parser')
+ def test_get_device_lvs_with_empty_vg(self, patched_output_parser, pcall):
+ patched_output_parser.return_value = [{'lv_name': ''}]
+ pcall.return_value = ('', '', '')
+ vgs = api.get_device_lvs('/dev/foo')
+ assert vgs == []
+
+
+# NOTE: api.convert_filters_to_str() and api.convert_tags_to_str() should get
+# tested automatically while testing api.make_filters_lvmcmd_ready()
+class TestMakeFiltersLVMCMDReady(object):
+
+ def test_with_no_filters_and_no_tags(self):
+ retval = api.make_filters_lvmcmd_ready(None, None)
+
+ assert isinstance(retval, str)
+ assert retval == ''
+
+ def test_with_filters_and_no_tags(self):
+ filters = {'lv_name': 'lv1', 'lv_path': '/dev/sda'}
+
+ retval = api.make_filters_lvmcmd_ready(filters, None)
+
+ assert isinstance(retval, str)
+ for k, v in filters.items():
+ assert k in retval
+ assert v in retval
+
+ def test_with_no_filters_and_with_tags(self):
+ tags = {'ceph.type': 'data', 'ceph.osd_id': '0'}
+
+ retval = api.make_filters_lvmcmd_ready(None, tags)
+
+ assert isinstance(retval, str)
+ assert 'tags' in retval
+ for k, v in tags.items():
+ assert k in retval
+ assert v in retval
+ assert retval.find('tags') < retval.find(k) < retval.find(v)
+
+ def test_with_filters_and_tags(self):
+ filters = {'lv_name': 'lv1', 'lv_path': '/dev/sda'}
+ tags = {'ceph.type': 'data', 'ceph.osd_id': '0'}
+
+ retval = api.make_filters_lvmcmd_ready(filters, tags)
+
+ assert isinstance(retval, str)
+ for f, t in zip(filters.items(), tags.items()):
+ assert f[0] in retval
+ assert f[1] in retval
+ assert t[0] in retval
+ assert t[1] in retval
+ assert retval.find(f[0]) < retval.find(f[1]) < \
+ retval.find('tags') < retval.find(t[0]) < retval.find(t[1])
+
+
+class TestGetPVs(object):
+
+ def test_get_pvs(self, monkeypatch):
+ pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={},
+ vg_name='vg1')
+ pv2 = api.PVolume(pv_name='/dev/sdb', pv_uuid='0001', pv_tags={},
+ vg_name='vg2')
+ pvs = [pv1, pv2]
+ stdout = ['{};{};{};{};;'.format(pv1.pv_name, pv1.pv_tags, pv1.pv_uuid, pv1.vg_name),
+ '{};{};{};{};;'.format(pv2.pv_name, pv2.pv_tags, pv2.pv_uuid, pv2.vg_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ pvs_ = api.get_pvs()
+ assert len(pvs_) == len(pvs)
+ for pv, pv_ in zip(pvs, pvs_):
+ assert pv_.pv_name == pv.pv_name
+
+ def test_get_pvs_single_pv(self, monkeypatch):
+ pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={},
+ vg_name='vg1')
+ pvs = [pv1]
+ stdout = ['{};;;;;;'.format(pv1.pv_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ pvs_ = api.get_pvs()
+ assert len(pvs_) == 1
+ assert pvs_[0].pv_name == pvs[0].pv_name
+
+ def test_get_pvs_empty(self, monkeypatch):
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
+ assert api.get_pvs() == []
+
+
+class TestGetVGs(object):
+
+ def test_get_vgs(self, monkeypatch):
+ vg1 = api.VolumeGroup(vg_name='vg1')
+ vg2 = api.VolumeGroup(vg_name='vg2')
+ vgs = [vg1, vg2]
+ stdout = ['{};;;;;;'.format(vg1.vg_name),
+ '{};;;;;;'.format(vg2.vg_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ vgs_ = api.get_vgs()
+ assert len(vgs_) == len(vgs)
+ for vg, vg_ in zip(vgs, vgs_):
+ assert vg_.vg_name == vg.vg_name
+
+ def test_get_vgs_single_vg(self, monkeypatch):
+ vg1 = api.VolumeGroup(vg_name='vg'); vgs = [vg1]
+ stdout = ['{};;;;;;'.format(vg1.vg_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ vgs_ = api.get_vgs()
+ assert len(vgs_) == 1
+ assert vgs_[0].vg_name == vgs[0].vg_name
+
+ def test_get_vgs_empty(self, monkeypatch):
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
+ assert api.get_vgs() == []
+
+
+class TestGetLVs(object):
+
+ def test_get_lvs(self, monkeypatch):
+ lv1 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg1/lv1',
+ lv_name='lv1', vg_name='vg1')
+ lv2 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg2/lv2',
+ lv_name='lv2', vg_name='vg2')
+ lvs = [lv1, lv2]
+ stdout = ['{};{};{};{}'.format(lv1.lv_tags, lv1.lv_path, lv1.lv_name,
+ lv1.vg_name),
+ '{};{};{};{}'.format(lv2.lv_tags, lv2.lv_path, lv2.lv_name,
+ lv2.vg_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ lvs_ = api.get_lvs()
+ assert len(lvs_) == len(lvs)
+ for lv, lv_ in zip(lvs, lvs_):
+ assert lv.__dict__ == lv_.__dict__
+
+ def test_get_lvs_single_lv(self, monkeypatch):
+ stdout = ['ceph.type=data;/dev/vg/lv;lv;vg']
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+ lvs = []
+ lvs.append((api.Volume(lv_tags='ceph.type=data',
+ lv_path='/dev/vg/lv',
+ lv_name='lv', vg_name='vg')))
+
+ lvs_ = api.get_lvs()
+ assert len(lvs_) == len(lvs)
+ assert lvs[0].__dict__ == lvs_[0].__dict__
+
+ def test_get_lvs_empty(self, monkeypatch):
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
+ assert api.get_lvs() == []
+
+
+class TestGetFirstPV(object):
+
+ def test_get_first_pv(self, monkeypatch):
+ pv1 = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={},
+ vg_name='vg1')
+ pv2 = api.PVolume(pv_name='/dev/sdb', pv_uuid='0001', pv_tags={},
+ vg_name='vg2')
+ stdout = ['{};{};{};{};;'.format(pv1.pv_name, pv1.pv_tags, pv1.pv_uuid, pv1.vg_name),
+ '{};{};{};{};;'.format(pv2.pv_name, pv2.pv_tags, pv2.pv_uuid, pv2.vg_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ pv_ = api.get_first_pv()
+ assert isinstance(pv_, api.PVolume)
+ assert pv_.pv_name == pv1.pv_name
+
+ def test_get_first_pv_single_pv(self, monkeypatch):
+ pv = api.PVolume(pv_name='/dev/sda', pv_uuid='0000', pv_tags={},
+ vg_name='vg1')
+ stdout = ['{};;;;;;'.format(pv.pv_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ pv_ = api.get_first_pv()
+ assert isinstance(pv_, api.PVolume)
+ assert pv_.pv_name == pv.pv_name
+
+ def test_get_first_pv_empty(self, monkeypatch):
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
+ assert api.get_first_pv() == []
+
+
+class TestGetFirstVG(object):
+
+ def test_get_first_vg(self, monkeypatch):
+ vg1 = api.VolumeGroup(vg_name='vg1')
+ vg2 = api.VolumeGroup(vg_name='vg2')
+ stdout = ['{};;;;;;'.format(vg1.vg_name), '{};;;;;;'.format(vg2.vg_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ vg_ = api.get_first_vg()
+ assert isinstance(vg_, api.VolumeGroup)
+ assert vg_.vg_name == vg1.vg_name
+
+ def test_get_first_vg_single_vg(self, monkeypatch):
+ vg = api.VolumeGroup(vg_name='vg')
+ stdout = ['{};;;;;;'.format(vg.vg_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ vg_ = api.get_first_vg()
+ assert isinstance(vg_, api.VolumeGroup)
+ assert vg_.vg_name == vg.vg_name
+
+ def test_get_first_vg_empty(self, monkeypatch):
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
+ vg_ = api.get_first_vg()
+ assert vg_ == []
+
+
+class TestGetFirstLV(object):
+
+ def test_get_first_lv(self, monkeypatch):
+ lv1 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg1/lv1',
+ lv_name='lv1', vg_name='vg1')
+ lv2 = api.Volume(lv_tags='ceph.type=data', lv_path='/dev/vg2/lv2',
+ lv_name='lv2', vg_name='vg2')
+ stdout = ['{};{};{};{}'.format(lv1.lv_tags, lv1.lv_path, lv1.lv_name,
+ lv1.vg_name),
+ '{};{};{};{}'.format(lv2.lv_tags, lv2.lv_path, lv2.lv_name,
+ lv2.vg_name)]
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+
+ lv_ = api.get_first_lv()
+ assert isinstance(lv_, api.Volume)
+ assert lv_.lv_name == lv1.lv_name
+
+ def test_get_first_lv_single_lv(self, monkeypatch):
+ stdout = ['ceph.type=data;/dev/vg/lv;lv;vg']
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: (stdout, '', 0))
+ lv = api.Volume(lv_tags='ceph.type=data',
+ lv_path='/dev/vg/lv',
+ lv_name='lv', vg_name='vg')
+
+ lv_ = api.get_first_lv()
+ assert isinstance(lv_, api.Volume)
+ assert lv_.lv_name == lv.lv_name
+
+ def test_get_first_lv_empty(self, monkeypatch):
+ monkeypatch.setattr(api.process, 'call', lambda x,**kw: ('', '', 0))
+ assert api.get_lvs() == []
diff --git a/src/ceph-volume/ceph_volume/tests/conftest.py b/src/ceph-volume/ceph_volume/tests/conftest.py
new file mode 100644
index 00000000..2abedac3
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/conftest.py
@@ -0,0 +1,295 @@
+import os
+import pytest
+from mock.mock import patch, PropertyMock, create_autospec
+from ceph_volume.api import lvm
+from ceph_volume.util import disk
+from ceph_volume.util import device
+from ceph_volume.util.constants import ceph_disk_guids
+from ceph_volume import conf, configuration
+
+
+class Capture(object):
+
+ def __init__(self, *a, **kw):
+ self.a = a
+ self.kw = kw
+ self.calls = []
+ self.return_values = kw.get('return_values', False)
+ self.always_returns = kw.get('always_returns', False)
+
+ def __call__(self, *a, **kw):
+ self.calls.append({'args': a, 'kwargs': kw})
+ if self.always_returns:
+ return self.always_returns
+ if self.return_values:
+ return self.return_values.pop()
+
+
+class Factory(object):
+
+ def __init__(self, **kw):
+ for k, v in kw.items():
+ setattr(self, k, v)
+
+
+@pytest.fixture
+def factory():
+ return Factory
+
+
+@pytest.fixture
+def capture():
+ return Capture()
+
+@pytest.fixture
+def mock_lv_device_generator():
+ def mock_lv():
+ size = 21474836480
+ dev = create_autospec(device.Device)
+ dev.lv_name = 'lv'
+ dev.vg_name = 'vg'
+ dev.path = '{}/{}'.format(dev.vg_name, dev.lv_name)
+ dev.used_by_ceph = False
+ dev.vg_size = [size]
+ dev.vg_free = dev.vg_size
+ dev.lvs = [lvm.Volume(vg_name=dev.vg_name, lv_name=dev.lv_name, lv_size=size, lv_tags='')]
+ return dev
+ return mock_lv
+
+
+@pytest.fixture
+def mock_devices_available():
+ dev = create_autospec(device.Device)
+ dev.path = '/dev/foo'
+ dev.available_lvm = True
+ dev.vg_size = [21474836480]
+ dev.vg_free = dev.vg_size
+ return [dev]
+
+@pytest.fixture
+def mock_device_generator():
+ def mock_device():
+ dev = create_autospec(device.Device)
+ dev.path = '/dev/foo'
+ dev.available_lvm = True
+ dev.vg_size = [21474836480]
+ dev.vg_free = dev.vg_size
+ dev.lvs = []
+ return dev
+ return mock_device
+
+
+@pytest.fixture(params=range(1,11))
+def osds_per_device(request):
+ return request.param
+
+
+@pytest.fixture
+def fake_run(monkeypatch):
+ fake_run = Capture()
+ monkeypatch.setattr('ceph_volume.process.run', fake_run)
+ return fake_run
+
+
+@pytest.fixture
+def fake_call(monkeypatch):
+ fake_call = Capture(always_returns=([], [], 0))
+ monkeypatch.setattr('ceph_volume.process.call', fake_call)
+ return fake_call
+
+
+@pytest.fixture
+def fakedevice(factory):
+ def apply(**kw):
+ params = dict(
+ path='/dev/sda',
+ abspath='/dev/sda',
+ lv_api=None,
+ pvs_api=[],
+ disk_api={},
+ sys_api={},
+ exists=True,
+ is_lvm_member=True,
+ )
+ params.update(dict(kw))
+ params['lvm_size'] = disk.Size(b=params['sys_api'].get("size", 0))
+ return factory(**params)
+ return apply
+
+
+@pytest.fixture
+def stub_call(monkeypatch):
+ """
+ Monkeypatches process.call, so that a caller can add behavior to the response
+ """
+ def apply(return_values):
+ if isinstance(return_values, tuple):
+ return_values = [return_values]
+ stubbed_call = Capture(return_values=return_values)
+ monkeypatch.setattr('ceph_volume.process.call', stubbed_call)
+ return stubbed_call
+
+ return apply
+
+
+@pytest.fixture(autouse=True)
+def reset_cluster_name(request, monkeypatch):
+ """
+ The globally available ``ceph_volume.conf.cluster`` might get mangled in
+ tests, make sure that after evert test, it gets reset, preventing pollution
+ going into other tests later.
+ """
+ def fin():
+ conf.cluster = None
+ try:
+ os.environ.pop('CEPH_CONF')
+ except KeyError:
+ pass
+ request.addfinalizer(fin)
+
+
+@pytest.fixture
+def conf_ceph(monkeypatch):
+ """
+ Monkeypatches ceph_volume.conf.ceph, which is meant to parse/read
+ a ceph.conf. The patching is naive, it allows one to set return values for
+ specific method calls.
+ """
+ def apply(**kw):
+ stub = Factory(**kw)
+ monkeypatch.setattr(conf, 'ceph', stub)
+ return stub
+ return apply
+
+
+@pytest.fixture
+def conf_ceph_stub(monkeypatch, tmpfile):
+ """
+ Monkeypatches ceph_volume.conf.ceph with contents from a string that are
+ written to a temporary file and then is fed through the same ceph.conf
+ loading mechanisms for testing. Unlike ``conf_ceph`` which is just a fake,
+ we are actually loading values as seen on a ceph.conf file
+
+ This is useful when more complex ceph.conf's are needed. In the case of
+ just trying to validate a key/value behavior ``conf_ceph`` is better
+ suited.
+ """
+ def apply(contents):
+ conf_path = tmpfile(contents=contents)
+ parser = configuration.load(conf_path)
+ monkeypatch.setattr(conf, 'ceph', parser)
+ return parser
+ return apply
+
+
+@pytest.fixture
+def is_root(monkeypatch):
+ """
+ Patch ``os.getuid()`` so that ceph-volume's decorators that ensure a user
+ is root (or is sudoing to superuser) can continue as-is
+ """
+ monkeypatch.setattr('os.getuid', lambda: 0)
+
+
+@pytest.fixture
+def tmpfile(tmpdir):
+ """
+ Create a temporary file, optionally filling it with contents, returns an
+ absolute path to the file when called
+ """
+ def generate_file(name='file', contents='', directory=None):
+ directory = directory or str(tmpdir)
+ path = os.path.join(directory, name)
+ with open(path, 'w') as fp:
+ fp.write(contents)
+ return path
+ return generate_file
+
+
+@pytest.fixture
+def disable_kernel_queries(monkeypatch):
+ '''
+ This speeds up calls to Device and Disk
+ '''
+ monkeypatch.setattr("ceph_volume.util.device.disk.get_devices", lambda: {})
+ monkeypatch.setattr("ceph_volume.util.disk.udevadm_property", lambda *a, **kw: {})
+
+
+@pytest.fixture(params=[
+ '', 'ceph data', 'ceph journal', 'ceph block',
+ 'ceph block.wal', 'ceph block.db', 'ceph lockbox'])
+def ceph_partlabel(request):
+ return request.param
+
+
+@pytest.fixture(params=list(ceph_disk_guids.keys()))
+def ceph_parttype(request):
+ return request.param
+
+
+@pytest.fixture
+def lsblk_ceph_disk_member(monkeypatch, request, ceph_partlabel, ceph_parttype):
+ monkeypatch.setattr("ceph_volume.util.device.disk.lsblk",
+ lambda path: {'TYPE': 'disk', 'PARTLABEL': ceph_partlabel})
+ # setting blkid here too in order to be able to fall back to PARTTYPE based
+ # membership
+ monkeypatch.setattr("ceph_volume.util.device.disk.blkid",
+ lambda path: {'TYPE': 'disk',
+ 'PARTLABEL': '',
+ 'PARTTYPE': ceph_parttype})
+
+
+@pytest.fixture
+def blkid_ceph_disk_member(monkeypatch, request, ceph_partlabel, ceph_parttype):
+ monkeypatch.setattr("ceph_volume.util.device.disk.blkid",
+ lambda path: {'TYPE': 'disk',
+ 'PARTLABEL': ceph_partlabel,
+ 'PARTTYPE': ceph_parttype})
+
+
+@pytest.fixture(params=[
+ ('gluster partition', 'gluster partition'),
+ # falls back to blkid
+ ('', 'gluster partition'),
+ ('gluster partition', ''),
+])
+def device_info_not_ceph_disk_member(monkeypatch, request):
+ monkeypatch.setattr("ceph_volume.util.device.disk.lsblk",
+ lambda path: {'TYPE': 'disk',
+ 'PARTLABEL': request.param[0]})
+ monkeypatch.setattr("ceph_volume.util.device.disk.blkid",
+ lambda path: {'TYPE': 'disk',
+ 'PARTLABEL': request.param[1]})
+
+@pytest.fixture
+def patched_get_block_devs_lsblk():
+ with patch('ceph_volume.util.disk.get_block_devs_lsblk') as p:
+ yield p
+
+@pytest.fixture
+def patch_bluestore_label():
+ with patch('ceph_volume.util.device.Device.has_bluestore_label',
+ new_callable=PropertyMock) as p:
+ p.return_value = False
+ yield p
+
+@pytest.fixture
+def device_info(monkeypatch, patch_bluestore_label):
+ def apply(devices=None, lsblk=None, lv=None, blkid=None, udevadm=None,
+ has_bluestore_label=False):
+ devices = devices if devices else {}
+ lsblk = lsblk if lsblk else {}
+ blkid = blkid if blkid else {}
+ udevadm = udevadm if udevadm else {}
+ lv = Factory(**lv) if lv else None
+ monkeypatch.setattr("ceph_volume.sys_info.devices", {})
+ monkeypatch.setattr("ceph_volume.util.device.disk.get_devices", lambda: devices)
+ if not devices:
+ monkeypatch.setattr("ceph_volume.util.device.lvm.get_first_lv", lambda filters: lv)
+ else:
+ monkeypatch.setattr("ceph_volume.util.device.lvm.get_device_lvs",
+ lambda path: [lv])
+ monkeypatch.setattr("ceph_volume.util.device.disk.lsblk", lambda path: lsblk)
+ monkeypatch.setattr("ceph_volume.util.device.disk.blkid", lambda path: blkid)
+ monkeypatch.setattr("ceph_volume.util.disk.udevadm_property", lambda *a, **kw: udevadm)
+ return apply
diff --git a/src/ceph-volume/ceph_volume/tests/devices/__init__.py b/src/ceph-volume/ceph_volume/tests/devices/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/__init__.py
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/__init__.py
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py
new file mode 100644
index 00000000..33e0ed32
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py
@@ -0,0 +1,414 @@
+import pytest
+from copy import deepcopy
+from ceph_volume.devices.lvm import activate
+from ceph_volume.api import lvm as api
+from ceph_volume.tests.conftest import Capture
+
+
+class Args(object):
+
+ def __init__(self, **kw):
+ # default flags
+ self.bluestore = False
+ self.filestore = False
+ self.no_systemd = False
+ self.auto_detect_objectstore = None
+ for k, v in kw.items():
+ setattr(self, k, v)
+
+
+class TestActivate(object):
+
+ # these tests are very functional, hence the heavy patching, it is hard to
+ # test the negative side effect with an actual functional run, so we must
+ # setup a perfect scenario for this test to check it can really work
+ # with/without osd_id
+ def test_no_osd_id_matches_fsid(self, is_root, monkeypatch, capture):
+ FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(FooVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: volumes)
+ monkeypatch.setattr(activate, 'activate_filestore', capture)
+ args = Args(osd_id=None, osd_fsid='1234', filestore=True)
+ activate.Activate([]).activate(args)
+ assert capture.calls[0]['args'][0] == [FooVolume]
+
+ def test_no_osd_id_matches_fsid_bluestore(self, is_root, monkeypatch, capture):
+ FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(FooVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: volumes)
+ monkeypatch.setattr(activate, 'activate_bluestore', capture)
+ args = Args(osd_id=None, osd_fsid='1234', bluestore=True)
+ activate.Activate([]).activate(args)
+ assert capture.calls[0]['args'][0] == [FooVolume]
+
+ def test_no_osd_id_no_matching_fsid(self, is_root, monkeypatch, capture):
+ FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=1111")
+ volumes = []
+ volumes.append(FooVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: [])
+ monkeypatch.setattr(api, 'get_first_lv', lambda **kwargs: [])
+ monkeypatch.setattr(activate, 'activate_filestore', capture)
+
+ args = Args(osd_id=None, osd_fsid='2222')
+ with pytest.raises(RuntimeError):
+ activate.Activate([]).activate(args)
+
+ def test_filestore_no_systemd(self, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ JournalVolume = api.Volume(
+ lv_name='journal',
+ lv_path='/dev/vg/journal',
+ lv_uuid='000',
+ lv_tags=','.join([
+ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
+ "ceph.journal_uuid=000", "ceph.type=journal",
+ "ceph.osd_id=0", "ceph.osd_fsid=1234"])
+ )
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_uuid='001',
+ lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
+ "journal,ceph.journal_uuid=000,ceph.type=data," + \
+ "ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ volumes.append(JournalVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, filestore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls == []
+ assert fake_start_osd.calls == []
+
+ def test_filestore_no_systemd_autodetect(self, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ JournalVolume = api.Volume(
+ lv_name='journal',
+ lv_path='/dev/vg/journal',
+ lv_uuid='000',
+ lv_tags=','.join([
+ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
+ "ceph.journal_uuid=000", "ceph.type=journal",
+ "ceph.osd_id=0", "ceph.osd_fsid=1234"])
+ )
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_uuid='001',
+ lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
+ "journal,ceph.journal_uuid=000,ceph.type=data," + \
+ "ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ volumes.append(JournalVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=True,
+ filestore=True, auto_detect_objectstore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls == []
+ assert fake_start_osd.calls == []
+
+ def test_filestore_systemd_autodetect(self, is_root, monkeypatch, capture):
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ JournalVolume = api.Volume(
+ lv_name='journal',
+ lv_path='/dev/vg/journal',
+ lv_uuid='000',
+ lv_tags=','.join([
+ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
+ "ceph.journal_uuid=000", "ceph.type=journal",
+ "ceph.osd_id=0","ceph.osd_fsid=1234"])
+ )
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_uuid='001',
+ lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
+ "journal,ceph.journal_uuid=000,ceph.type=data," + \
+ "ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ volumes.append(JournalVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
+ filestore=True, auto_detect_objectstore=False)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls != []
+ assert fake_start_osd.calls != []
+
+ def test_filestore_systemd(self, is_root, monkeypatch, capture):
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ monkeypatch.setattr('ceph_volume.util.system.device_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ JournalVolume = api.Volume(
+ lv_name='journal',
+ lv_path='/dev/vg/journal',
+ lv_uuid='000',
+ lv_tags=','.join([
+ "ceph.cluster_name=ceph", "ceph.journal_device=/dev/vg/journal",
+ "ceph.journal_uuid=000", "ceph.type=journal",
+ "ceph.osd_id=0","ceph.osd_fsid=1234"])
+ )
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_uuid='001',
+ lv_tags="ceph.cluster_name=ceph,ceph.journal_device=/dev/vg/" + \
+ "journal,ceph.journal_uuid=000,ceph.type=data," + \
+ "ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ volumes.append(JournalVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
+ filestore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls != []
+ assert fake_start_osd.calls != []
+
+ def test_bluestore_no_systemd(self, is_root, monkeypatch, capture):
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \
+ "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, bluestore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls == []
+ assert fake_start_osd.calls == []
+
+ def test_bluestore_systemd(self, is_root, monkeypatch, capture):
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \
+ "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
+ bluestore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls != []
+ assert fake_start_osd.calls != []
+
+ def test_bluestore_no_systemd_autodetect(self, is_root, monkeypatch, capture):
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_tags="ceph.cluster_name=ceph,,ceph.block_uuid=000," + \
+ "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=True,
+ bluestore=True, auto_detect_objectstore=True)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls == []
+ assert fake_start_osd.calls == []
+
+ def test_bluestore_systemd_autodetect(self, is_root, monkeypatch, capture):
+ fake_enable = Capture()
+ fake_start_osd = Capture()
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted',
+ lambda *a, **kw: True)
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw:
+ True)
+ monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
+ monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ DataVolume = api.Volume(
+ lv_name='data',
+ lv_path='/dev/vg/data',
+ lv_tags="ceph.cluster_name=ceph,,ceph.journal_uuid=000," + \
+ "ceph.type=block,ceph.osd_id=0,ceph.osd_fsid=1234")
+ volumes = []
+ volumes.append(DataVolume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
+
+ args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
+ bluestore=True, auto_detect_objectstore=False)
+ activate.Activate([]).activate(args)
+ assert fake_enable.calls != []
+ assert fake_start_osd.calls != []
+
+class TestActivateFlags(object):
+
+ def test_default_objectstore(self, capture):
+ args = ['0', 'asdf-ljh-asdf']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ parsed_args = capture.calls[0]['args'][0]
+ assert parsed_args.filestore is False
+ assert parsed_args.bluestore is True
+
+ def test_uses_filestore(self, capture):
+ args = ['--filestore', '0', 'asdf-ljh-asdf']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ parsed_args = capture.calls[0]['args'][0]
+ assert parsed_args.filestore is True
+ assert parsed_args.bluestore is False
+
+ def test_uses_bluestore(self, capture):
+ args = ['--bluestore', '0', 'asdf-ljh-asdf']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ parsed_args = capture.calls[0]['args'][0]
+ assert parsed_args.filestore is False
+ assert parsed_args.bluestore is True
+
+
+class TestActivateAll(object):
+
+ def test_does_not_detect_osds(self, capsys, is_root, capture, monkeypatch):
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: {})
+ args = ['--all']
+ activation = activate.Activate(args)
+ activation.main()
+ out, err = capsys.readouterr()
+ assert 'Was unable to find any OSDs to activate' in err
+ assert 'Verify OSDs are present with ' in err
+
+ def test_detects_running_osds(self, capsys, is_root, capture, monkeypatch):
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report)
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.systemctl.osd_is_active', lambda x: True)
+ args = ['--all']
+ activation = activate.Activate(args)
+ activation.main()
+ out, err = capsys.readouterr()
+ assert 'a8789a96ce8b process is active. Skipping activation' in err
+ assert 'b8218eaa1634 process is active. Skipping activation' in err
+
+ def test_detects_osds_to_activate(self, is_root, capture, monkeypatch):
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report)
+ monkeypatch.setattr('ceph_volume.devices.lvm.activate.systemctl.osd_is_active', lambda x: False)
+ args = ['--all']
+ activation = activate.Activate(args)
+ activation.activate = capture
+ activation.main()
+ calls = sorted(capture.calls, key=lambda x: x['kwargs']['osd_id'])
+ assert calls[0]['kwargs']['osd_id'] == '0'
+ assert calls[0]['kwargs']['osd_fsid'] == '957d22b7-24ce-466a-9883-b8218eaa1634'
+ assert calls[1]['kwargs']['osd_id'] == '1'
+ assert calls[1]['kwargs']['osd_fsid'] == 'd0f3e4ad-e52a-4520-afc0-a8789a96ce8b'
+
+#
+# Activate All fixture
+#
+
+direct_report = {
+ "0": [
+ {
+ "lv_name": "osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "lv_path": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "lv_tags": "ceph.block_device=/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634,ceph.block_uuid=6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d4962338-46ff-4cd5-8ea6-c033dbdc5b44,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=957d22b7-24ce-466a-9883-b8218eaa1634,ceph.osd_id=0,ceph.type=block",
+ "lv_uuid": "6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4",
+ "name": "osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "path": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "tags": {
+ "ceph.block_device": "/dev/ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44/osd-block-957d22b7-24ce-466a-9883-b8218eaa1634",
+ "ceph.block_uuid": "6MixOd-2Q1I-f8K3-PPOq-UJGV-L3A0-0XwUm4",
+ "ceph.cephx_lockbox_secret": "",
+ "ceph.cluster_fsid": "d4962338-46ff-4cd5-8ea6-c033dbdc5b44",
+ "ceph.cluster_name": "ceph",
+ "ceph.crush_device_class": "None",
+ "ceph.encrypted": "0",
+ "ceph.osd_fsid": "957d22b7-24ce-466a-9883-b8218eaa1634",
+ "ceph.osd_id": "0",
+ "ceph.type": "block"
+ },
+ "type": "block",
+ "vg_name": "ceph-d4962338-46ff-4cd5-8ea6-c033dbdc5b44"
+ }
+ ],
+ "1": [
+ {
+ "lv_name": "osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "lv_path": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "lv_tags": "ceph.block_device=/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b,ceph.block_uuid=1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=d4962338-46ff-4cd5-8ea6-c033dbdc5b44,ceph.cluster_name=ceph,ceph.crush_device_class=None,ceph.encrypted=0,ceph.osd_fsid=d0f3e4ad-e52a-4520-afc0-a8789a96ce8b,ceph.osd_id=1,ceph.type=block",
+ "lv_uuid": "1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW",
+ "name": "osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "path": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "tags": {
+ "ceph.block_device": "/dev/ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532/osd-block-d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "ceph.block_uuid": "1igwLb-ZlmV-eLgp-hapx-c1Hr-M5gz-sHjnyW",
+ "ceph.cephx_lockbox_secret": "",
+ "ceph.cluster_fsid": "d4962338-46ff-4cd5-8ea6-c033dbdc5b44",
+ "ceph.cluster_name": "ceph",
+ "ceph.crush_device_class": "None",
+ "ceph.encrypted": "0",
+ "ceph.osd_fsid": "d0f3e4ad-e52a-4520-afc0-a8789a96ce8b",
+ "ceph.osd_id": "1",
+ "ceph.type": "block"
+ },
+ "type": "block",
+ "vg_name": "ceph-7538bcf0-f155-4d3f-a9fd-d8b15905e532"
+ }
+ ]
+}
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py
new file mode 100644
index 00000000..7c968ae8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py
@@ -0,0 +1,280 @@
+import pytest
+import json
+import random
+
+from argparse import ArgumentError
+from mock import MagicMock, patch
+
+from ceph_volume.devices.lvm import batch
+from ceph_volume.util import arg_validators
+
+
+class TestBatch(object):
+
+ def test_batch_instance(self, is_root):
+ b = batch.Batch([])
+ b.main()
+
+ def test_disjoint_device_lists(self, factory):
+ device1 = factory(used_by_ceph=False, available=True, abspath="/dev/sda")
+ device2 = factory(used_by_ceph=False, available=True, abspath="/dev/sdb")
+ devices = [device1, device2]
+ db_devices = [device2]
+ with pytest.raises(Exception) as disjoint_ex:
+ batch.ensure_disjoint_device_lists(devices, db_devices)
+ assert 'Device lists are not disjoint' in str(disjoint_ex.value)
+
+ @patch('ceph_volume.util.arg_validators.Device')
+ def test_reject_partition(self, mocked_device):
+ mocked_device.return_value = MagicMock(
+ is_partition=True,
+ has_gpt_headers=False,
+ )
+ with pytest.raises(ArgumentError):
+ arg_validators.ValidBatchDevice()('foo')
+
+ @pytest.mark.parametrize('format_', ['pretty', 'json', 'json-pretty'])
+ def test_report(self, format_, factory, conf_ceph_stub, mock_device_generator):
+ # just ensure reporting works
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ devs = [mock_device_generator() for _ in range(5)]
+ args = factory(data_slots=1,
+ osds_per_device=1,
+ osd_ids=[],
+ report=True,
+ format=format_,
+ devices=devs,
+ db_devices=[],
+ wal_devices=[],
+ bluestore=True,
+ block_db_size="1G",
+ dmcrypt=True,
+ )
+ b = batch.Batch([])
+ plan = b.get_plan(args)
+ b.args = args
+ b.report(plan)
+
+ @pytest.mark.parametrize('format_', ['json', 'json-pretty'])
+ def test_json_report_valid_empty(self, format_, factory, conf_ceph_stub, mock_device_generator):
+ # ensure json reports are valid when empty
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ devs = []
+ args = factory(data_slots=1,
+ osds_per_device=1,
+ osd_ids=[],
+ report=True,
+ format=format_,
+ devices=devs,
+ db_devices=[],
+ wal_devices=[],
+ bluestore=True,
+ block_db_size="1G",
+ dmcrypt=True,
+ )
+ b = batch.Batch([])
+ plan = b.get_plan(args)
+ b.args = args
+ report = b._create_report(plan)
+ json.loads(report)
+
+ @pytest.mark.parametrize('format_', ['json', 'json-pretty'])
+ def test_json_report_valid_empty_unavailable_fast(self, format_, factory, conf_ceph_stub, mock_device_generator):
+ # ensure json reports are valid when empty
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ devs = [mock_device_generator() for _ in range(5)]
+ fast_devs = [mock_device_generator()]
+ fast_devs[0].available_lvm = False
+ args = factory(data_slots=1,
+ osds_per_device=1,
+ osd_ids=[],
+ report=True,
+ format=format_,
+ devices=devs,
+ db_devices=fast_devs,
+ wal_devices=[],
+ bluestore=True,
+ block_db_size="1G",
+ dmcrypt=True,
+ )
+ b = batch.Batch([])
+ plan = b.get_plan(args)
+ b.args = args
+ report = b._create_report(plan)
+ json.loads(report)
+
+
+ @pytest.mark.parametrize('format_', ['json', 'json-pretty'])
+ def test_json_report_valid_empty_unavailable_very_fast(self, format_, factory, conf_ceph_stub, mock_device_generator):
+ # ensure json reports are valid when empty
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ devs = [mock_device_generator() for _ in range(5)]
+ fast_devs = [mock_device_generator()]
+ very_fast_devs = [mock_device_generator()]
+ very_fast_devs[0].available_lvm = False
+ args = factory(data_slots=1,
+ osds_per_device=1,
+ osd_ids=[],
+ report=True,
+ format=format_,
+ devices=devs,
+ db_devices=fast_devs,
+ wal_devices=very_fast_devs,
+ bluestore=True,
+ block_db_size="1G",
+ dmcrypt=True,
+ )
+ b = batch.Batch([])
+ plan = b.get_plan(args)
+ b.args = args
+ report = b._create_report(plan)
+ json.loads(report)
+
+ @pytest.mark.parametrize('rota', [0, 1])
+ def test_batch_sort_full(self, factory, rota):
+ device1 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sda")
+ device2 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sdb")
+ device3 = factory(used_by_ceph=False, available=True, rotational=rota, abspath="/dev/sdc")
+ devices = [device1, device2, device3]
+ args = factory(report=True,
+ devices=devices,
+ filestore=False,
+ )
+ b = batch.Batch([])
+ b.args = args
+ b._sort_rotational_disks()
+ assert len(b.args.devices) == 3
+
+ @pytest.mark.parametrize('objectstore', ['bluestore', 'filestore'])
+ def test_batch_sort_mixed(self, factory, objectstore):
+ device1 = factory(used_by_ceph=False, available=True, rotational=1, abspath="/dev/sda")
+ device2 = factory(used_by_ceph=False, available=True, rotational=1, abspath="/dev/sdb")
+ device3 = factory(used_by_ceph=False, available=True, rotational=0, abspath="/dev/sdc")
+ devices = [device1, device2, device3]
+ args = factory(report=True,
+ devices=devices,
+ filestore=False if objectstore == 'bluestore' else True,
+ )
+ b = batch.Batch([])
+ b.args = args
+ b._sort_rotational_disks()
+ assert len(b.args.devices) == 2
+ if objectstore == 'bluestore':
+ assert len(b.args.db_devices) == 1
+ else:
+ assert len(b.args.journal_devices) == 1
+
+ def test_get_physical_osds_return_len(self, factory,
+ mock_devices_available,
+ conf_ceph_stub,
+ osds_per_device):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(data_slots=1, osds_per_device=osds_per_device,
+ osd_ids=[], dmcrypt=False)
+ osds = batch.get_physical_osds(mock_devices_available, args)
+ assert len(osds) == len(mock_devices_available) * osds_per_device
+
+ def test_get_physical_osds_rel_size(self, factory,
+ mock_devices_available,
+ conf_ceph_stub,
+ osds_per_device):
+ args = factory(data_slots=1, osds_per_device=osds_per_device,
+ osd_ids=[], dmcrypt=False)
+ osds = batch.get_physical_osds(mock_devices_available, args)
+ for osd in osds:
+ assert osd.data[1] == 1.0 / osds_per_device
+
+ def test_get_physical_osds_abs_size(self, factory,
+ mock_devices_available,
+ conf_ceph_stub,
+ osds_per_device):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(data_slots=1, osds_per_device=osds_per_device,
+ osd_ids=[], dmcrypt=False)
+ osds = batch.get_physical_osds(mock_devices_available, args)
+ for osd, dev in zip(osds, mock_devices_available):
+ assert osd.data[2] == int(dev.vg_size[0] / osds_per_device)
+
+ def test_get_physical_osds_osd_ids(self, factory,
+ mock_devices_available,
+ osds_per_device):
+ pass
+
+ def test_get_physical_fast_allocs_length(self, factory,
+ conf_ceph_stub,
+ mock_devices_available):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(block_db_slots=None, get_block_db_size=None)
+ fast = batch.get_physical_fast_allocs(mock_devices_available,
+ 'block_db', 2, 2, args)
+ assert len(fast) == 2
+
+ @pytest.mark.parametrize('occupied_prior', range(7))
+ @pytest.mark.parametrize('slots,num_devs',
+ [l for sub in [list(zip([x]*x, range(1, x + 1))) for x in range(1,7)] for l in sub])
+ def test_get_physical_fast_allocs_length_existing(self,
+ num_devs,
+ slots,
+ occupied_prior,
+ factory,
+ conf_ceph_stub,
+ mock_device_generator):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ occupied_prior = min(occupied_prior, slots)
+ devs = [mock_device_generator() for _ in range(num_devs)]
+ already_assigned = 0
+ while already_assigned < occupied_prior:
+ dev_i = random.randint(0, num_devs - 1)
+ dev = devs[dev_i]
+ if len(dev.lvs) < occupied_prior:
+ dev.lvs.append('foo')
+ dev.path = '/dev/bar'
+ already_assigned = sum([len(d.lvs) for d in devs])
+ args = factory(block_db_slots=None, get_block_db_size=None)
+ expected_num_osds = max(len(devs) * slots - occupied_prior, 0)
+ fast = batch.get_physical_fast_allocs(devs,
+ 'block_db', slots,
+ expected_num_osds, args)
+ assert len(fast) == expected_num_osds
+ expected_assignment_on_used_devices = sum([slots - len(d.lvs) for d in devs if len(d.lvs) > 0])
+ assert len([f for f in fast if f[0] == '/dev/bar']) == expected_assignment_on_used_devices
+ assert len([f for f in fast if f[0] != '/dev/bar']) == expected_num_osds - expected_assignment_on_used_devices
+
+ def test_get_lvm_osds_return_len(self, factory,
+ mock_lv_device_generator,
+ conf_ceph_stub,
+ osds_per_device):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ args = factory(data_slots=1, osds_per_device=osds_per_device,
+ osd_ids=[], dmcrypt=False)
+ mock_lvs = [mock_lv_device_generator()]
+ osds = batch.get_lvm_osds(mock_lvs, args)
+ assert len(osds) == 1
+
+
+class TestBatchOsd(object):
+
+ def test_osd_class_ctor(self):
+ osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None)
+ assert osd.data == batch.Batch.OSD.VolSpec('/dev/data',
+ 1,
+ '5G',
+ 1,
+ 'data')
+ def test_add_fast(self):
+ osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None)
+ osd.add_fast_device('/dev/db', 1, '5G', 1, 'block_db')
+ assert osd.fast == batch.Batch.OSD.VolSpec('/dev/db',
+ 1,
+ '5G',
+ 1,
+ 'block_db')
+
+ def test_add_very_fast(self):
+ osd = batch.Batch.OSD('/dev/data', 1, '5G', 1, 1, None)
+ osd.add_very_fast_device('/dev/wal', 1, '5G', 1)
+ assert osd.very_fast == batch.Batch.OSD.VolSpec('/dev/wal',
+ 1,
+ '5G',
+ 1,
+ 'block_wal')
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py
new file mode 100644
index 00000000..fe792d5a
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_common.py
@@ -0,0 +1,8 @@
+from ceph_volume.devices.lvm import common
+
+
+class TestCommon(object):
+
+ def test_get_default_args_smoke(self):
+ default_args = common.get_default_args()
+ assert default_args
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py
new file mode 100644
index 00000000..994038f3
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_create.py
@@ -0,0 +1,48 @@
+import pytest
+from ceph_volume.devices import lvm
+
+
+class TestCreate(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.create.Create([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Create an OSD by assigning an ID and FSID' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ lvm.create.Create(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Use the filestore objectstore' in stdout
+ assert 'Use the bluestore objectstore' in stdout
+ assert 'A physical device or logical' in stdout
+
+ def test_excludes_filestore_bluestore_flags(self, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.create.Create(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)'
+ assert expected in stderr
+
+ def test_excludes_other_filestore_bluestore_flags(self, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.create.Create(argv=[
+ '--bluestore', '--data', '/dev/sdfoo',
+ '--journal', '/dev/sf14',
+ ]).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)'
+ assert expected in stderr
+
+ def test_excludes_block_and_journal_flags(self, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.create.Create(argv=[
+ '--bluestore', '--data', '/dev/sdfoo', '--block.db', 'vg/ceph1',
+ '--journal', '/dev/sf14',
+ ]).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --block.db (bluestore) with --journal (filestore)'
+ assert expected in stderr
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py
new file mode 100644
index 00000000..4b8304ce
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_deactivate.py
@@ -0,0 +1,59 @@
+import pytest
+from mock.mock import patch
+from ceph_volume.api import lvm
+from ceph_volume.devices.lvm import deactivate
+
+class TestDeactivate(object):
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ def test_no_osd(self, p_get_lvs):
+ p_get_lvs.return_value = []
+ with pytest.raises(StopIteration):
+ deactivate.deactivate_osd(0)
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ @patch("ceph_volume.util.system.unmount_tmpfs")
+ def test_unmount_tmpfs_called_osd_id(self, p_u_tmpfs, p_get_lvs):
+ FooVolume = lvm.Volume(
+ lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_id=0,ceph.cluster_name=foo,ceph.type=data")
+ p_get_lvs.return_value = [FooVolume]
+
+ deactivate.deactivate_osd(0)
+ p_u_tmpfs.assert_called_with(
+ '/var/lib/ceph/osd/{}-{}'.format('foo', 0))
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ @patch("ceph_volume.util.system.unmount_tmpfs")
+ def test_unmount_tmpfs_called_osd_uuid(self, p_u_tmpfs, p_get_lvs):
+ FooVolume = lvm.Volume(
+ lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=0,ceph.osd_id=1,ceph.cluster_name=foo,ceph.type=data")
+ p_get_lvs.return_value = [FooVolume]
+
+ deactivate.deactivate_osd(None, 0)
+ p_u_tmpfs.assert_called_with(
+ '/var/lib/ceph/osd/{}-{}'.format('foo', 1))
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ @patch("ceph_volume.util.system.unmount_tmpfs")
+ @patch("ceph_volume.util.encryption.dmcrypt_close")
+ def test_no_crypt_no_dmclose(self, p_dm_close, p_u_tmpfs, p_get_lvs):
+ FooVolume = lvm.Volume(
+ lv_name='foo', lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_id=0,ceph.cluster_name=foo,ceph.type=data")
+ p_get_lvs.return_value = [FooVolume]
+
+ deactivate.deactivate_osd(0)
+
+ @patch("ceph_volume.devices.lvm.deactivate.get_lvs_by_tag")
+ @patch("ceph_volume.util.system.unmount_tmpfs")
+ @patch("ceph_volume.util.encryption.dmcrypt_close")
+ def test_crypt_dmclose(self, p_dm_close, p_u_tmpfs, p_get_lvs):
+ FooVolume = lvm.Volume(
+ lv_name='foo', lv_path='/dev/vg/foo', lv_uuid='123',
+ lv_tags="ceph.osd_id=0,ceph.encrypted=1,ceph.cluster_name=foo,ceph.type=data")
+ p_get_lvs.return_value = [FooVolume]
+
+ deactivate.deactivate_osd(0)
+ p_dm_close.assert_called_with('123')
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py
new file mode 100644
index 00000000..cf4b68c7
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_listing.py
@@ -0,0 +1,265 @@
+import pytest
+from ceph_volume.devices import lvm
+from ceph_volume.api import lvm as api
+
+# TODO: add tests for following commands -
+# ceph-volume list
+# ceph-volume list <path-to-pv>
+# ceph-volume list <path-to-vg>
+# ceph-volume list <path-to-lv>
+
+class TestReadableTag(object):
+
+ def test_dots_get_replaced(self):
+ result = lvm.listing.readable_tag('ceph.foo')
+ assert result == 'foo'
+
+ def test_underscores_are_replaced_with_spaces(self):
+ result = lvm.listing.readable_tag('ceph.long_tag')
+ assert result == 'long tag'
+
+
+class TestPrettyReport(object):
+
+ def test_is_empty(self, capsys):
+ lvm.listing.pretty_report({})
+ stdout, stderr = capsys.readouterr()
+ assert stdout == '\n'
+
+ def test_type_and_path_are_reported(self, capsys):
+ lvm.listing.pretty_report({0: [
+ {'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda']}
+ ]})
+ stdout, stderr = capsys.readouterr()
+ assert '[data] /dev/sda1' in stdout
+
+ def test_osd_id_header_is_reported(self, capsys):
+ lvm.listing.pretty_report({0: [
+ {'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda']}
+ ]})
+ stdout, stderr = capsys.readouterr()
+ assert '====== osd.0 =======' in stdout
+
+ def test_tags_are_included(self, capsys):
+ lvm.listing.pretty_report(
+ {0: [{
+ 'type': 'data',
+ 'path': '/dev/sda1',
+ 'tags': {'ceph.osd_id': '0'},
+ 'devices': ['/dev/sda'],
+ }]}
+ )
+ stdout, stderr = capsys.readouterr()
+ assert 'osd id' in stdout
+
+ def test_devices_are_comma_separated(self, capsys):
+ lvm.listing.pretty_report({0: [
+ {'type': 'data', 'path': '/dev/sda1', 'devices': ['/dev/sda', '/dev/sdb1']}
+ ]})
+ stdout, stderr = capsys.readouterr()
+ assert '/dev/sda,/dev/sdb1' in stdout
+
+
+class TestList(object):
+
+ def test_empty_full_json_zero_exit_status(self, is_root,factory,capsys):
+ args = factory(format='json', device=None)
+ lvm.listing.List([]).list(args)
+ stdout, stderr = capsys.readouterr()
+ assert stdout == '{}\n'
+
+ def test_empty_device_json_zero_exit_status(self, is_root,factory,capsys):
+ args = factory(format='json', device='/dev/sda1')
+ lvm.listing.List([]).list(args)
+ stdout, stderr = capsys.readouterr()
+ assert stdout == '{}\n'
+
+ def test_empty_full_zero_exit_status(self, is_root, factory):
+ args = factory(format='pretty', device=None)
+ with pytest.raises(SystemExit):
+ lvm.listing.List([]).list(args)
+
+ def test_empty_device_zero_exit_status(self, is_root, factory):
+ args = factory(format='pretty', device='/dev/sda1')
+ with pytest.raises(SystemExit):
+ lvm.listing.List([]).list(args)
+
+class TestFullReport(object):
+
+ def test_no_ceph_lvs(self, monkeypatch):
+ # ceph lvs are detected by looking into its tags
+ osd = api.Volume(lv_name='volume1', lv_path='/dev/VolGroup/lv',
+ lv_tags={})
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).full_report()
+ assert result == {}
+
+ def test_ceph_data_lv_reported(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ pv = api.PVolume(pv_name='/dev/sda1', pv_tags={}, pv_uuid="0000",
+ vg_name='VolGroup', lv_uuid="aaaa")
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(lvm.listing.api, 'get_first_pv', lambda **kwargs: pv)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).full_report()
+ assert result['0'][0]['name'] == 'volume1'
+
+ def test_ceph_journal_lv_reported(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ journal_tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=journal'
+ pv = api.PVolume(pv_name='/dev/sda1', pv_tags={}, pv_uuid="0000",
+ vg_name="VolGroup", lv_uuid="aaaa")
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
+ journal = api.Volume(
+ lv_name='journal', lv_uuid='x', lv_tags=journal_tags,
+ lv_path='/dev/VolGroup/journal', vg_name='VolGroup')
+ volumes = []
+ volumes.append(osd)
+ volumes.append(journal)
+ monkeypatch.setattr(lvm.listing.api,'get_first_pv',lambda **kwargs:pv)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).full_report()
+ assert result['0'][0]['name'] == 'volume1'
+ assert result['0'][1]['name'] == 'journal'
+
+ def test_ceph_wal_lv_reported(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.wal_uuid=x,ceph.type=data'
+ wal_tags = 'ceph.osd_id=0,ceph.wal_uuid=x,ceph.type=wal'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
+ wal = api.Volume(lv_name='wal', lv_uuid='x', lv_tags=wal_tags,
+ lv_path='/dev/VolGroup/wal', vg_name='VolGroup')
+ volumes = []
+ volumes.append(osd)
+ volumes.append(wal)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).full_report()
+ assert result['0'][0]['name'] == 'volume1'
+ assert result['0'][1]['name'] == 'wal'
+
+ @pytest.mark.parametrize('type_', ['journal', 'db', 'wal'])
+ def test_physical_2nd_device_gets_reported(self, type_, monkeypatch):
+ tags = ('ceph.osd_id=0,ceph.{t}_uuid=x,ceph.type=data,'
+ 'ceph.{t}_device=/dev/sda1').format(t=type_)
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ vg_name='VolGroup', lv_path='/dev/VolGroup/lv')
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ [osd])
+
+ result = lvm.listing.List([]).full_report()
+ assert result['0'][1]['path'] == '/dev/sda1'
+ assert result['0'][1]['tags'] == {'PARTUUID': 'x'}
+ assert result['0'][1]['type'] == type_
+
+
+class TestSingleReport(object):
+
+ def test_not_a_ceph_lv(self, monkeypatch):
+ # ceph lvs are detected by looking into its tags
+ lv = api.Volume(lv_name='lv', lv_tags={}, lv_path='/dev/VolGroup/lv',
+ vg_name='VolGroup')
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ [lv])
+
+ result = lvm.listing.List([]).single_report('VolGroup/lv')
+ assert result == {}
+
+ def test_report_a_ceph_lv(self, monkeypatch):
+ # ceph lvs are detected by looking into its tags
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ lv = api.Volume(lv_name='lv', vg_name='VolGroup', lv_uuid='aaaa',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(lv)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ result = lvm.listing.List([]).single_report('VolGroup/lv')
+ assert result['0'][0]['name'] == 'lv'
+ assert result['0'][0]['lv_tags'] == tags
+ assert result['0'][0]['path'] == '/dev/VolGroup/lv'
+ assert result['0'][0]['devices'] == []
+
+ def test_report_a_ceph_journal_device(self, monkeypatch):
+ # ceph lvs are detected by looking into its tags
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data,' + \
+ 'ceph.journal_device=/dev/sda1'
+ lv = api.Volume(lv_name='lv', lv_uuid='aaa', lv_tags=tags,
+ lv_path='/dev/VolGroup/lv', vg_name='VolGroup')
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ [lv] if 'tags' in kwargs else [])
+
+ result = lvm.listing.List([]).single_report('/dev/sda1')
+ assert result['0'][0]['tags'] == {'PARTUUID': 'x'}
+ assert result['0'][0]['type'] == 'journal'
+ assert result['0'][0]['path'] == '/dev/sda1'
+
+ def test_report_a_ceph_lv_with_devices(self, monkeypatch):
+ pvolumes = []
+
+ tags = 'ceph.osd_id=0,ceph.type=data'
+ pv1 = api.PVolume(vg_name="VolGroup", pv_name='/dev/sda1',
+ pv_uuid='', pv_tags={}, lv_uuid="aaaa")
+ pv2 = api.PVolume(vg_name="VolGroup", pv_name='/dev/sdb1',
+ pv_uuid='', pv_tags={}, lv_uuid="aaaa")
+ pvolumes.append(pv1)
+ pvolumes.append(pv2)
+
+
+ volumes = []
+ lv = api.Volume(lv_name='lv', vg_name='VolGroup',lv_uuid='aaaa',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes.append(lv)
+
+ monkeypatch.setattr(lvm.listing.api, 'get_pvs', lambda **kwargs:
+ pvolumes)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ listing = lvm.listing.List([])
+ listing._pvs = [
+ {'lv_uuid': 'aaaa', 'pv_name': '/dev/sda1', 'pv_tags': '', 'pv_uuid': ''},
+ {'lv_uuid': 'aaaa', 'pv_name': '/dev/sdb1', 'pv_tags': '', 'pv_uuid': ''},
+ ]
+
+ result = listing.single_report('VolGroup/lv')
+ assert result['0'][0]['name'] == 'lv'
+ assert result['0'][0]['lv_tags'] == tags
+ assert result['0'][0]['path'] == '/dev/VolGroup/lv'
+ assert result['0'][0]['devices'] == ['/dev/sda1', '/dev/sdb1']
+
+ def test_report_a_ceph_lv_with_no_matching_devices(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.type=data'
+ lv = api.Volume(lv_name='lv', vg_name='VolGroup', lv_uuid='aaaa',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(lv)
+ monkeypatch.setattr(lvm.listing.api, 'get_lvs', lambda **kwargs:
+ volumes)
+
+ listing = lvm.listing.List([])
+ listing._pvs = [
+ {'lv_uuid': 'ffff', 'pv_name': '/dev/sda1', 'pv_tags': '',
+ 'pv_uuid': ''},
+ {'lv_uuid': 'ffff', 'pv_name': '/dev/sdb1', 'pv_tags': '',
+ 'pv_uuid': ''}]
+
+ result = listing.single_report('VolGroup/lv')
+ assert result['0'][0]['name'] == 'lv'
+ assert result['0'][0]['lv_tags'] == tags
+ assert result['0'][0]['path'] == '/dev/VolGroup/lv'
+ assert result['0'][0]['devices'] == []
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py
new file mode 100644
index 00000000..70915a0f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py
@@ -0,0 +1,174 @@
+import pytest
+from ceph_volume.devices import lvm
+from ceph_volume.api import lvm as api
+from mock.mock import patch, Mock
+
+
+class TestLVM(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.main.LVM([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Use LVM and LVM-based technologies to deploy' in stdout
+
+ def test_main_shows_activate_subcommands(self, capsys):
+ lvm.main.LVM([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'activate ' in stdout
+ assert 'Discover and mount' in stdout
+
+ def test_main_shows_prepare_subcommands(self, capsys):
+ lvm.main.LVM([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'prepare ' in stdout
+ assert 'Format an LVM device' in stdout
+
+
+class TestPrepareDevice(object):
+
+ def test_cannot_use_device(self, factory):
+ args = factory(data='/dev/var/foo')
+ with pytest.raises(RuntimeError) as error:
+ p = lvm.prepare.Prepare([])
+ p.args = args
+ p.prepare_data_device( 'data', '0')
+ assert 'Cannot use device (/dev/var/foo)' in str(error.value)
+ assert 'A vg/lv path or an existing device is needed' in str(error.value)
+
+
+class TestGetClusterFsid(object):
+
+ def test_fsid_is_passed_in(self, factory):
+ args = factory(cluster_fsid='aaaa-1111')
+ prepare_obj = lvm.prepare.Prepare([])
+ prepare_obj.args = args
+ assert prepare_obj.get_cluster_fsid() == 'aaaa-1111'
+
+ def test_fsid_is_read_from_ceph_conf(self, factory, conf_ceph_stub):
+ conf_ceph_stub('[global]\nfsid = bbbb-2222')
+ prepare_obj = lvm.prepare.Prepare([])
+ prepare_obj.args = factory(cluster_fsid=None)
+ assert prepare_obj.get_cluster_fsid() == 'bbbb-2222'
+
+
+class TestPrepare(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.prepare.Prepare([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Prepare an OSD by assigning an ID and FSID' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Use the filestore objectstore' in stdout
+ assert 'Use the bluestore objectstore' in stdout
+ assert 'A physical device or logical' in stdout
+
+ def test_excludes_filestore_bluestore_flags(self, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=['--data', '/dev/sdfoo', '--filestore', '--bluestore']).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --filestore (filestore) with --bluestore (bluestore)'
+ assert expected in stderr
+
+ def test_excludes_other_filestore_bluestore_flags(self, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=[
+ '--bluestore', '--data', '/dev/sdfoo',
+ '--journal', '/dev/sf14',
+ ]).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --bluestore (bluestore) with --journal (filestore)'
+ assert expected in stderr
+
+ def test_excludes_block_and_journal_flags(self, capsys, device_info):
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.prepare.Prepare(argv=[
+ '--bluestore', '--data', '/dev/sdfoo', '--block.db', 'vg/ceph1',
+ '--journal', '/dev/sf14',
+ ]).main()
+ stdout, stderr = capsys.readouterr()
+ expected = 'Cannot use --block.db (bluestore) with --journal (filestore)'
+ assert expected in stderr
+
+ def test_journal_is_required_with_filestore(self, is_root, monkeypatch, device_info):
+ monkeypatch.setattr("os.path.exists", lambda path: True)
+ device_info()
+ with pytest.raises(SystemExit) as error:
+ lvm.prepare.Prepare(argv=['--filestore', '--data', '/dev/sdfoo']).main()
+ expected = '--journal is required when using --filestore'
+ assert expected in str(error.value)
+
+ @patch('ceph_volume.devices.lvm.prepare.api.is_ceph_device')
+ def test_safe_prepare_osd_already_created(self, m_is_ceph_device):
+ m_is_ceph_device.return_value = True
+ with pytest.raises(RuntimeError) as error:
+ prepare = lvm.prepare.Prepare(argv=[])
+ prepare.args = Mock()
+ prepare.args.data = '/dev/sdfoo'
+ prepare.get_lv = Mock()
+ prepare.safe_prepare()
+ expected = 'skipping {}, it is already prepared'.format('/dev/sdfoo')
+ assert expected in str(error.value)
+
+ def test_setup_device_device_name_is_none(self):
+ result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name=None, tags={'ceph.type': 'data'}, size=0, slots=None)
+ assert result == ('', '', {'ceph.type': 'data'})
+
+ @patch('ceph_volume.api.lvm.Volume.set_tags')
+ @patch('ceph_volume.devices.lvm.prepare.api.get_first_lv')
+ def test_setup_device_lv_passed(self, m_get_first_lv, m_set_tags):
+ fake_volume = api.Volume(lv_name='lv_foo', lv_path='/fake-path', vg_name='vg_foo', lv_tags='', lv_uuid='fake-uuid')
+ m_get_first_lv.return_value = fake_volume
+ result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='vg_foo/lv_foo', tags={'ceph.type': 'data'}, size=0, slots=None)
+
+ assert result == ('/fake-path', 'fake-uuid', {'ceph.type': 'data',
+ 'ceph.vdo': '0',
+ 'ceph.data_uuid': 'fake-uuid',
+ 'ceph.data_device': '/fake-path'})
+
+ @patch('ceph_volume.devices.lvm.prepare.api.create_lv')
+ @patch('ceph_volume.api.lvm.Volume.set_tags')
+ @patch('ceph_volume.util.disk.is_device')
+ def test_setup_device_device_passed(self, m_is_device, m_set_tags, m_create_lv):
+ fake_volume = api.Volume(lv_name='lv_foo', lv_path='/fake-path', vg_name='vg_foo', lv_tags='', lv_uuid='fake-uuid')
+ m_is_device.return_value = True
+ m_create_lv.return_value = fake_volume
+ result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None)
+
+ assert result == ('/fake-path', 'fake-uuid', {'ceph.type': 'data',
+ 'ceph.vdo': '0',
+ 'ceph.data_uuid': 'fake-uuid',
+ 'ceph.data_device': '/fake-path'})
+
+ @patch('ceph_volume.devices.lvm.prepare.Prepare.get_ptuuid')
+ @patch('ceph_volume.devices.lvm.prepare.api.get_first_lv')
+ def test_setup_device_partition_passed(self, m_get_first_lv, m_get_ptuuid):
+ m_get_first_lv.side_effect = ValueError()
+ m_get_ptuuid.return_value = 'fake-uuid'
+ result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None)
+
+ assert result == ('/dev/sdx', 'fake-uuid', {'ceph.type': 'data',
+ 'ceph.vdo': '0',
+ 'ceph.data_uuid': 'fake-uuid',
+ 'ceph.data_device': '/dev/sdx'})
+
+
+class TestActivate(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.activate.Activate([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Activate OSDs by discovering them with' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ lvm.activate.Activate(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'optional arguments' in stdout
+ assert 'positional arguments' in stdout
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py
new file mode 100644
index 00000000..b5280f93
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_trigger.py
@@ -0,0 +1,45 @@
+import pytest
+from ceph_volume import exceptions
+from ceph_volume.devices.lvm import trigger
+
+
+class TestParseOSDid(object):
+
+ def test_no_id_found_if_no_digit(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_id('asdlj-ljahsdfaslkjhdfa')
+
+ def test_no_id_found(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_id('ljahsdfaslkjhdfa')
+
+ def test_id_found(self):
+ result = trigger.parse_osd_id('1-ljahsdfaslkjhdfa')
+ assert result == '1'
+
+
+class TestParseOSDUUID(object):
+
+ def test_uuid_is_parsed(self):
+ result = trigger.parse_osd_uuid('1-asdf-ljkh-asdf-ljkh-asdf')
+ assert result == 'asdf-ljkh-asdf-ljkh-asdf'
+
+ def test_uuid_is_parsed_longer_sha1(self):
+ result = trigger.parse_osd_uuid('1-foo-bar-asdf-ljkh-asdf-ljkh-asdf')
+ assert result == 'foo-bar-asdf-ljkh-asdf-ljkh-asdf'
+
+ def test_uuid_is_not_found(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_uuid('ljahsdfaslkjhdfa')
+
+ def test_uuid_is_not_found_missing_id(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_uuid('ljahs-dfa-slkjhdfa-foo')
+
+ def test_robust_double_id_in_uuid(self):
+ # it is possible to have the id in the SHA1, this should
+ # be fine parsing that
+ result = trigger.parse_osd_uuid("1-abc959fd-1ec9-4864-b141-3154f9b9f8ed")
+ assert result == 'abc959fd-1ec9-4864-b141-3154f9b9f8ed'
+
+
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
new file mode 100644
index 00000000..1fa22e5b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
@@ -0,0 +1,236 @@
+import os
+import pytest
+from copy import deepcopy
+from mock.mock import patch, call
+from ceph_volume import process
+from ceph_volume.api import lvm as api
+from ceph_volume.devices.lvm import zap
+
+
+class TestFindAssociatedDevices(object):
+
+ def test_no_lvs_found_that_match_id(self, monkeypatch, device_info):
+ tags = 'ceph.osd_id=9,ceph.journal_uuid=x,ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_tags=tags, lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
+
+ with pytest.raises(RuntimeError):
+ zap.find_associated_devices(osd_id=10)
+
+ def test_no_lvs_found_that_match_fsid(self, monkeypatch, device_info):
+ tags = 'ceph.osd_id=9,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,'+\
+ 'ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags=tags,
+ vg_name='vg', lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
+
+ with pytest.raises(RuntimeError):
+ zap.find_associated_devices(osd_fsid='aaaa-lkjh')
+
+ def test_no_lvs_found_that_match_id_fsid(self, monkeypatch, device_info):
+ tags = 'ceph.osd_id=9,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,'+\
+ 'ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='vg',
+ lv_tags=tags, lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
+
+ with pytest.raises(RuntimeError):
+ zap.find_associated_devices(osd_id='9', osd_fsid='aaaa-lkjh')
+
+ def test_no_ceph_lvs_found(self, monkeypatch):
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', lv_tags='',
+ lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kwargs: {})
+
+ with pytest.raises(RuntimeError):
+ zap.find_associated_devices(osd_id=100)
+
+ def test_lv_is_matched_id(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.journal_uuid=x,ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = zap.find_associated_devices(osd_id='0')
+ assert result[0].abspath == '/dev/VolGroup/lv'
+
+ def test_lv_is_matched_fsid(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\
+ 'ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: deepcopy(volumes))
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = zap.find_associated_devices(osd_fsid='asdf-lkjh')
+ assert result[0].abspath == '/dev/VolGroup/lv'
+
+ def test_lv_is_matched_id_fsid(self, monkeypatch):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,' +\
+ 'ceph.type=data'
+ osd = api.Volume(lv_name='volume1', lv_uuid='y', vg_name='',
+ lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ monkeypatch.setattr(zap.api, 'get_lvs', lambda **kw: volumes)
+ monkeypatch.setattr(process, 'call', lambda x, **kw: ('', '', 0))
+
+ result = zap.find_associated_devices(osd_id='0', osd_fsid='asdf-lkjh')
+ assert result[0].abspath == '/dev/VolGroup/lv'
+
+
+class TestEnsureAssociatedLVs(object):
+
+ def test_nothing_is_found(self):
+ volumes = []
+ result = zap.ensure_associated_lvs(volumes)
+ assert result == []
+
+ def test_data_is_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=data'
+ osd = api.Volume(
+ lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/data', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert result == ['/dev/VolGroup/data']
+
+ def test_block_is_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=block'
+ osd = api.Volume(
+ lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/block', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert result == ['/dev/VolGroup/block']
+
+ def test_success_message_for_fsid(self, factory, is_root, capsys):
+ cli_zap = zap.Zap([])
+ args = factory(devices=[], osd_id=None, osd_fsid='asdf-lkjh')
+ cli_zap.args = args
+ cli_zap.zap()
+ out, err = capsys.readouterr()
+ assert "Zapping successful for OSD: asdf-lkjh" in err
+
+ def test_success_message_for_id(self, factory, is_root, capsys):
+ cli_zap = zap.Zap([])
+ args = factory(devices=[], osd_id='1', osd_fsid=None)
+ cli_zap.args = args
+ cli_zap.zap()
+ out, err = capsys.readouterr()
+ assert "Zapping successful for OSD: 1" in err
+
+ def test_block_and_partition_are_found(self, monkeypatch):
+ monkeypatch.setattr(zap.disk, 'get_device_from_partuuid', lambda x: '/dev/sdb1')
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=block'
+ osd = api.Volume(
+ lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/block', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/sdb1' in result
+ assert '/dev/VolGroup/block' in result
+
+ def test_journal_is_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal'
+ osd = api.Volume(
+ lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv', lv_tags=tags)
+ volumes = []
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert result == ['/dev/VolGroup/lv']
+
+ def test_multiple_journals_are_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal'
+ volumes = []
+ for i in range(3):
+ osd = api.Volume(
+ lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/VolGroup/lv0' in result
+ assert '/dev/VolGroup/lv1' in result
+ assert '/dev/VolGroup/lv2' in result
+
+ def test_multiple_dbs_are_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=db'
+ volumes = []
+ for i in range(3):
+ osd = api.Volume(
+ lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/VolGroup/lv0' in result
+ assert '/dev/VolGroup/lv1' in result
+ assert '/dev/VolGroup/lv2' in result
+
+ def test_multiple_wals_are_found(self):
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.wal_uuid=x,ceph.type=wal'
+ volumes = []
+ for i in range(3):
+ osd = api.Volume(
+ lv_name='volume%s' % i, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % i, lv_tags=tags)
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/VolGroup/lv0' in result
+ assert '/dev/VolGroup/lv1' in result
+ assert '/dev/VolGroup/lv2' in result
+
+ def test_multiple_backing_devs_are_found(self):
+ volumes = []
+ for _type in ['journal', 'db', 'wal']:
+ tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.wal_uuid=x,ceph.type=%s' % _type
+ osd = api.Volume(
+ lv_name='volume%s' % _type, lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/lv%s' % _type, lv_tags=tags)
+ volumes.append(osd)
+ result = zap.ensure_associated_lvs(volumes)
+ assert '/dev/VolGroup/lvjournal' in result
+ assert '/dev/VolGroup/lvwal' in result
+ assert '/dev/VolGroup/lvdb' in result
+
+ @patch('ceph_volume.devices.lvm.zap.api.get_lvs')
+ def test_ensure_associated_lvs(self, m_get_lvs):
+ zap.ensure_associated_lvs([], lv_tags={'ceph.osd_id': '1'})
+ calls = [
+ call(tags={'ceph.type': 'journal', 'ceph.osd_id': '1'}),
+ call(tags={'ceph.type': 'db', 'ceph.osd_id': '1'}),
+ call(tags={'ceph.type': 'wal', 'ceph.osd_id': '1'})
+ ]
+ m_get_lvs.assert_has_calls(calls, any_order=True)
+
+
+class TestWipeFs(object):
+
+ def setup(self):
+ os.environ['CEPH_VOLUME_WIPEFS_INTERVAL'] = '0'
+
+ def test_works_on_second_try(self, stub_call):
+ os.environ['CEPH_VOLUME_WIPEFS_TRIES'] = '2'
+ stub_call([('wiping /dev/sda', '', 1), ('', '', 0)])
+ result = zap.wipefs('/dev/sda')
+ assert result is None
+
+ def test_does_not_work_after_several_tries(self, stub_call):
+ os.environ['CEPH_VOLUME_WIPEFS_TRIES'] = '2'
+ stub_call([('wiping /dev/sda', '', 1), ('', '', 1)])
+ with pytest.raises(RuntimeError):
+ zap.wipefs('/dev/sda')
+
+ def test_does_not_work_default_tries(self, stub_call):
+ stub_call([('wiping /dev/sda', '', 1)]*8)
+ with pytest.raises(RuntimeError):
+ zap.wipefs('/dev/sda')
diff --git a/src/ceph-volume/ceph_volume/tests/devices/raw/__init__.py b/src/ceph-volume/ceph_volume/tests/devices/raw/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/raw/__init__.py
diff --git a/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py b/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py
new file mode 100644
index 00000000..e4cf8ce1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py
@@ -0,0 +1,97 @@
+import pytest
+from ceph_volume.devices import raw
+from mock.mock import patch
+
+
+class TestRaw(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ raw.main.Raw([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Manage a single-device OSD on a raw block device.' in stdout
+
+ def test_main_shows_activate_subcommands(self, capsys):
+ raw.main.Raw([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'activate ' in stdout
+ assert 'Discover and prepare' in stdout
+
+ def test_main_shows_prepare_subcommands(self, capsys):
+ raw.main.Raw([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'prepare ' in stdout
+ assert 'Format a raw device' in stdout
+
+
+class TestPrepare(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ raw.prepare.Prepare([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Prepare an OSD by assigning an ID and FSID' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ raw.prepare.Prepare(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'a raw device to use for the OSD' in stdout
+ assert 'Crush device class to assign this OSD to' in stdout
+ assert 'Use BlueStore backend' in stdout
+ assert 'Path to bluestore block.db block device' in stdout
+ assert 'Path to bluestore block.wal block device' in stdout
+ assert 'Enable device encryption via dm-crypt' in stdout
+
+ @patch('ceph_volume.util.arg_validators.ValidDevice.__call__')
+ def test_prepare_dmcrypt_no_secret_passed(self, m_valid_device, capsys):
+ m_valid_device.return_value = '/dev/foo'
+ with pytest.raises(SystemExit):
+ raw.prepare.Prepare(argv=['--bluestore', '--data', '/dev/foo', '--dmcrypt']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'CEPH_VOLUME_DMCRYPT_SECRET is not set, you must set' in stderr
+
+ @patch('ceph_volume.util.encryption.luks_open')
+ @patch('ceph_volume.util.encryption.luks_format')
+ @patch('ceph_volume.util.disk.lsblk')
+ def test_prepare_dmcrypt_block(self, m_lsblk, m_luks_format, m_luks_open):
+ m_lsblk.return_value = {'KNAME': 'foo'}
+ m_luks_format.return_value = True
+ m_luks_open.return_value = True
+ result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'block', '123')
+ m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-block-dmcrypt')
+ m_luks_format.assert_called_with('foo', '/dev/foo')
+ assert result == '/dev/mapper/ceph-123-foo-block-dmcrypt'
+
+ @patch('ceph_volume.util.encryption.luks_open')
+ @patch('ceph_volume.util.encryption.luks_format')
+ @patch('ceph_volume.util.disk.lsblk')
+ def test_prepare_dmcrypt_db(self, m_lsblk, m_luks_format, m_luks_open):
+ m_lsblk.return_value = {'KNAME': 'foo'}
+ m_luks_format.return_value = True
+ m_luks_open.return_value = True
+ result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'db', '123')
+ m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-db-dmcrypt')
+ m_luks_format.assert_called_with('foo', '/dev/foo')
+ assert result == '/dev/mapper/ceph-123-foo-db-dmcrypt'
+
+ @patch('ceph_volume.util.encryption.luks_open')
+ @patch('ceph_volume.util.encryption.luks_format')
+ @patch('ceph_volume.util.disk.lsblk')
+ def test_prepare_dmcrypt_wal(self, m_lsblk, m_luks_format, m_luks_open):
+ m_lsblk.return_value = {'KNAME': 'foo'}
+ m_luks_format.return_value = True
+ m_luks_open.return_value = True
+ result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'wal', '123')
+ m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-wal-dmcrypt')
+ m_luks_format.assert_called_with('foo', '/dev/foo')
+ assert result == '/dev/mapper/ceph-123-foo-wal-dmcrypt'
+
+ @patch('ceph_volume.devices.raw.prepare.rollback_osd')
+ @patch('ceph_volume.devices.raw.prepare.Prepare.prepare')
+ @patch('ceph_volume.util.arg_validators.ValidDevice.__call__')
+ def test_safe_prepare_exception_raised(self, m_valid_device, m_prepare, m_rollback_osd):
+ m_valid_device.return_value = '/dev/foo'
+ m_prepare.side_effect=Exception('foo')
+ m_rollback_osd.return_value = 'foobar'
+ with pytest.raises(Exception):
+ raw.prepare.Prepare(argv=['--bluestore', '--data', '/dev/foo']).main()
+ m_rollback_osd.assert_called()
diff --git a/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py b/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py
new file mode 100644
index 00000000..ac2dd0e7
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py
@@ -0,0 +1,200 @@
+import os
+import pytest
+from ceph_volume.devices.simple import activate
+
+
+class TestActivate(object):
+
+ def test_no_data_uuid(self, factory, tmpfile, is_root, monkeypatch, capture):
+ json_config = tmpfile(contents='{}')
+ args = factory(osd_id='0', osd_fsid='1234', json_config=json_config)
+ with pytest.raises(RuntimeError):
+ activate.Activate([]).activate(args)
+
+ def test_invalid_json_path(self):
+ os.environ['CEPH_VOLUME_SIMPLE_JSON_DIR'] = '/non/existing/path'
+ with pytest.raises(RuntimeError) as error:
+ activate.Activate(['1', 'asdf']).main()
+ assert 'Expected JSON config path not found' in str(error.value)
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ activate.Activate([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Activate OSDs by mounting devices previously configured' in stdout
+
+ def test_activate_all(self, is_root, monkeypatch):
+ '''
+ make sure Activate calls activate for each file returned by glob
+ '''
+ mocked_glob = []
+ def mock_glob(glob):
+ path = os.path.dirname(glob)
+ mocked_glob.extend(['{}/{}.json'.format(path, file_) for file_ in
+ ['1', '2', '3']])
+ return mocked_glob
+ activate_files = []
+ def mock_activate(self, args):
+ activate_files.append(args.json_config)
+ monkeypatch.setattr('glob.glob', mock_glob)
+ monkeypatch.setattr(activate.Activate, 'activate', mock_activate)
+ activate.Activate(['--all']).main()
+ assert activate_files == mocked_glob
+
+
+
+
+class TestEnableSystemdUnits(object):
+
+ def test_nothing_is_activated(self, tmpfile, is_root, capsys):
+ json_config = tmpfile(contents='{}')
+ activation = activate.Activate(['--no-systemd', '--file', json_config, '0', '1234'], from_trigger=True)
+ activation.activate = lambda x: True
+ activation.main()
+ activation.enable_systemd_units('0', '1234')
+ stdout, stderr = capsys.readouterr()
+ assert 'Skipping enabling of `simple`' in stderr
+ assert 'Skipping masking of ceph-disk' in stderr
+ assert 'Skipping enabling and starting OSD simple' in stderr
+
+ def test_no_systemd_flag_is_true(self, tmpfile, is_root):
+ json_config = tmpfile(contents='{}')
+ activation = activate.Activate(['--no-systemd', '--file', json_config, '0', '1234'], from_trigger=True)
+ activation.activate = lambda x: True
+ activation.main()
+ assert activation.skip_systemd is True
+
+ def test_no_systemd_flag_is_false(self, tmpfile, is_root):
+ json_config = tmpfile(contents='{}')
+ activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=True)
+ activation.activate = lambda x: True
+ activation.main()
+ assert activation.skip_systemd is False
+
+ def test_masks_ceph_disk(self, tmpfile, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', capture)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True)
+
+ json_config = tmpfile(contents='{}')
+ activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=False)
+ activation.activate = lambda x: True
+ activation.main()
+ activation.enable_systemd_units('0', '1234')
+ assert len(capture.calls) == 1
+
+ def test_enables_simple_unit(self, tmpfile, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', capture)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True)
+
+ json_config = tmpfile(contents='{}')
+ activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=False)
+ activation.activate = lambda x: True
+ activation.main()
+ activation.enable_systemd_units('0', '1234')
+ assert len(capture.calls) == 1
+ assert capture.calls[0]['args'] == ('0', '1234', 'simple')
+
+ def test_enables_osd_unit(self, tmpfile, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', capture)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', lambda *a: True)
+
+ json_config = tmpfile(contents='{}')
+ activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=False)
+ activation.activate = lambda x: True
+ activation.main()
+ activation.enable_systemd_units('0', '1234')
+ assert len(capture.calls) == 1
+ assert capture.calls[0]['args'] == ('0',)
+
+ def test_starts_osd_unit(self, tmpfile, is_root, monkeypatch, capture):
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.mask_ceph_disk', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_volume', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.enable_osd', lambda *a: True)
+ monkeypatch.setattr('ceph_volume.systemd.systemctl.start_osd', capture)
+
+ json_config = tmpfile(contents='{}')
+ activation = activate.Activate(['--file', json_config, '0', '1234'], from_trigger=False)
+ activation.activate = lambda x: True
+ activation.main()
+ activation.enable_systemd_units('0', '1234')
+ assert len(capture.calls) == 1
+ assert capture.calls[0]['args'] == ('0',)
+
+
+class TestValidateDevices(object):
+
+ def test_filestore_missing_journal(self):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError) as error:
+ activation.validate_devices({'type': 'filestore', 'data': {}})
+ assert 'Unable to activate filestore OSD due to missing devices' in str(error.value)
+
+ def test_filestore_missing_data(self):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError) as error:
+ activation.validate_devices({'type': 'filestore', 'journal': {}})
+ assert 'Unable to activate filestore OSD due to missing devices' in str(error.value)
+
+ def test_filestore_journal_device_found(self, capsys):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError):
+ activation.validate_devices({'type': 'filestore', 'journal': {}})
+ stdout, stderr = capsys.readouterr()
+ assert "devices found: ['journal']" in stderr
+
+ def test_filestore_data_device_found(self, capsys):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError):
+ activation.validate_devices({'type': 'filestore', 'data': {}})
+ stdout, stderr = capsys.readouterr()
+ assert "devices found: ['data']" in stderr
+
+ def test_filestore_with_all_devices(self):
+ activation = activate.Activate([])
+ result = activation.validate_devices({'type': 'filestore', 'journal': {}, 'data': {}})
+ assert result is True
+
+ def test_filestore_without_type(self):
+ activation = activate.Activate([])
+ result = activation.validate_devices({'journal': {}, 'data': {}})
+ assert result is True
+
+ def test_bluestore_with_all_devices(self):
+ activation = activate.Activate([])
+ result = activation.validate_devices({'type': 'bluestore', 'data': {}, 'block': {}})
+ assert result is True
+
+ def test_bluestore_without_type(self):
+ activation = activate.Activate([])
+ result = activation.validate_devices({'data': {}, 'block': {}})
+ assert result is True
+
+ def test_bluestore_is_default(self):
+ activation = activate.Activate([])
+ result = activation.validate_devices({'data': {}, 'block': {}})
+ assert result is True
+
+ def test_bluestore_data_device_found(self, capsys):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError):
+ activation.validate_devices({'data': {}})
+ stdout, stderr = capsys.readouterr()
+ assert "devices found: ['data']" in stderr
+
+ def test_bluestore_missing_data(self):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError) as error:
+ activation.validate_devices({'type': 'bluestore', 'block': {}})
+ assert 'Unable to activate bluestore OSD due to missing devices' in str(error.value)
+
+ def test_bluestore_block_device_found(self, capsys):
+ activation = activate.Activate([])
+ with pytest.raises(RuntimeError):
+ activation.validate_devices({'block': {}})
+ stdout, stderr = capsys.readouterr()
+ assert "devices found: ['block']" in stderr
diff --git a/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py b/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py
new file mode 100644
index 00000000..11849362
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/simple/test_scan.py
@@ -0,0 +1,68 @@
+import os
+import pytest
+from ceph_volume.devices.simple import scan
+
+
+class TestGetContents(object):
+
+ def test_multiple_lines_are_left_as_is(self, tmpfile):
+ magic_file = tmpfile(contents='first\nsecond\n')
+ scanner = scan.Scan([])
+ assert scanner.get_contents(magic_file) == 'first\nsecond\n'
+
+ def test_extra_whitespace_gets_removed(self, tmpfile):
+ magic_file = tmpfile(contents='first ')
+ scanner = scan.Scan([])
+ assert scanner.get_contents(magic_file) == 'first'
+
+ def test_single_newline_values_are_trimmed(self, tmpfile):
+ magic_file = tmpfile(contents='first\n')
+ scanner = scan.Scan([])
+ assert scanner.get_contents(magic_file) == 'first'
+
+
+class TestEtcPath(object):
+
+ def test_directory_is_valid(self, tmpdir):
+ path = str(tmpdir)
+ scanner = scan.Scan([])
+ scanner._etc_path = path
+ assert scanner.etc_path == path
+
+ def test_directory_does_not_exist_gets_created(self, tmpdir):
+ path = os.path.join(str(tmpdir), 'subdir')
+ scanner = scan.Scan([])
+ scanner._etc_path = path
+ assert scanner.etc_path == path
+ assert os.path.isdir(path)
+
+ def test_complains_when_file(self, tmpfile):
+ path = tmpfile()
+ scanner = scan.Scan([])
+ scanner._etc_path = path
+ with pytest.raises(RuntimeError):
+ scanner.etc_path
+
+
+class TestParseKeyring(object):
+
+ def test_newlines_are_removed(self):
+ contents = [
+ '[client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]',
+ '\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==', '']
+ assert '\n' not in scan.parse_keyring('\n'.join(contents))
+
+ def test_key_has_spaces_removed(self):
+ contents = [
+ '[client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]',
+ '\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==', '']
+ result = scan.parse_keyring('\n'.join(contents))
+ assert result.startswith(' ') is False
+ assert result.endswith(' ') is False
+
+ def test_actual_key_is_extracted(self):
+ contents = [
+ '[client.osd-lockbox.8d7a8ab2-5db0-4f83-a785-2809aba403d5]',
+ '\tkey = AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA==', '']
+ result = scan.parse_keyring('\n'.join(contents))
+ assert result == 'AQDtoGha/GYJExAA7HNl7Ukhqr7AKlCpLJk6UA=='
diff --git a/src/ceph-volume/ceph_volume/tests/devices/simple/test_trigger.py b/src/ceph-volume/ceph_volume/tests/devices/simple/test_trigger.py
new file mode 100644
index 00000000..d3220f2b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/simple/test_trigger.py
@@ -0,0 +1,45 @@
+import pytest
+from ceph_volume import exceptions
+from ceph_volume.devices.simple import trigger
+
+
+class TestParseOSDid(object):
+
+ def test_no_id_found_if_no_digit(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_id('asdlj-ljahsdfaslkjhdfa')
+
+ def test_no_id_found(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_id('ljahsdfaslkjhdfa')
+
+ def test_id_found(self):
+ result = trigger.parse_osd_id('1-ljahsdfaslkjhdfa')
+ assert result == '1'
+
+
+class TestParseOSDUUID(object):
+
+ def test_uuid_is_parsed(self):
+ result = trigger.parse_osd_uuid('1-asdf-ljkh-asdf-ljkh-asdf')
+ assert result == 'asdf-ljkh-asdf-ljkh-asdf'
+
+ def test_uuid_is_parsed_longer_sha1(self):
+ result = trigger.parse_osd_uuid('1-foo-bar-asdf-ljkh-asdf-ljkh-asdf')
+ assert result == 'foo-bar-asdf-ljkh-asdf-ljkh-asdf'
+
+ def test_uuid_is_not_found(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_uuid('ljahsdfaslkjhdfa')
+
+ def test_uuid_is_not_found_missing_id(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ trigger.parse_osd_uuid('ljahs-dfa-slkjhdfa-foo')
+
+ def test_robust_double_id_in_uuid(self):
+ # it is possible to have the id in the SHA1, this should
+ # be fine parsing that
+ result = trigger.parse_osd_uuid("1-abc959fd-1ec9-4864-b141-3154f9b9f8ed")
+ assert result == 'abc959fd-1ec9-4864-b141-3154f9b9f8ed'
+
+
diff --git a/src/ceph-volume/ceph_volume/tests/devices/test_zap.py b/src/ceph-volume/ceph_volume/tests/devices/test_zap.py
new file mode 100644
index 00000000..42c4940f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/devices/test_zap.py
@@ -0,0 +1,28 @@
+import pytest
+from ceph_volume.devices import lvm
+
+
+class TestZap(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ lvm.zap.Zap([]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Zaps the given logical volume(s), raw device(s) or partition(s)' in stdout
+
+ def test_main_shows_full_help(self, capsys):
+ with pytest.raises(SystemExit):
+ lvm.zap.Zap(argv=['--help']).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'optional arguments' in stdout
+
+ @pytest.mark.parametrize('device_name', [
+ '/dev/mapper/foo',
+ '/dev/dm-0',
+ ])
+ def test_can_not_zap_mapper_device(self, monkeypatch, device_info, capsys, is_root, device_name):
+ monkeypatch.setattr('os.path.exists', lambda x: True)
+ device_info()
+ with pytest.raises(SystemExit):
+ lvm.zap.Zap(argv=[device_name]).main()
+ stdout, stderr = capsys.readouterr()
+ assert 'Refusing to zap' in stderr
diff --git a/src/ceph-volume/ceph_volume/tests/functional/.gitignore b/src/ceph-volume/ceph_volume/tests/functional/.gitignore
new file mode 100644
index 00000000..a2ee2e58
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/.gitignore
@@ -0,0 +1,5 @@
+*.vdi
+.vagrant/
+vagrant_ssh_config
+fetch/
+global_vagrant_variables.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/README.md b/src/ceph-volume/ceph_volume/tests/functional/README.md
new file mode 100644
index 00000000..b9e892ac
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/README.md
@@ -0,0 +1,24 @@
+# ceph-volume functional test suite
+
+This test suite is based on vagrant and is normally run via Jenkins on github
+PRs. With a functioning Vagrant installation these test can also be run locally
+(tested with vagrant's libvirt provider).
+
+## Vagrant with libvirt
+By default the tests make assumption on the network segments to use (public and
+cluster network), as well as the libvirt storage pool and uri. In an unused
+vagrant setup these defaults should be fine.
+If you prefer to explicitly configure the storage pool and libvirt
+uri, create a file
+`$ceph_repo/src/ceph-volume/ceph_volume/tests/functional/global_vagrant_variables.yml`
+with content as follows:
+``` yaml
+libvirt_uri: qemu:///system
+libvirt_storage_pool: 'vagrant-ceph-nvme'
+```
+Adjust the values as needed.
+
+After this descend into a test directory (e.g.
+`$ceph_repo/src/ceph-volume/ceph_volume/tests/functional/lvm` and run `tox -vre
+centos7-bluestore-create -- --provider=libvirt` to execute the tests in
+`$ceph_repo/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/`
diff --git a/src/ceph-volume/ceph_volume/tests/functional/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/Vagrantfile
new file mode 100644
index 00000000..9341698e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/Vagrantfile
@@ -0,0 +1,429 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+require 'yaml'
+require 'time'
+VAGRANTFILE_API_VERSION = '2'
+
+DEBUG = false
+
+global_settings = {}
+if File.symlink?(__FILE__)
+ global_config = File.expand_path(
+ File.join(
+ File.dirname(File.readlink(__FILE__)),
+ 'global_vagrant_variables.yml')
+ )
+ if File.exist?(global_config)
+ global_settings = YAML.load_file(global_config)
+ end
+end
+
+LIBVIRT_URI = global_settings.fetch('libvirt_uri', '')
+LIBVIRT_STORAGE_POOL = global_settings.fetch('libvirt_storage_pool', '')
+
+config_file=File.expand_path(File.join(File.dirname(__FILE__), 'vagrant_variables.yml'))
+settings=YAML.load_file(config_file)
+
+LABEL_PREFIX = settings['label_prefix'] ? settings['label_prefix'] + "-" : ""
+NMONS = settings['mon_vms']
+NOSDS = settings['osd_vms']
+NMDSS = settings['mds_vms']
+NRGWS = settings['rgw_vms']
+NNFSS = settings['nfs_vms']
+RESTAPI = settings['restapi']
+NRBD_MIRRORS = settings['rbd_mirror_vms']
+CLIENTS = settings['client_vms']
+NISCSI_GWS = settings['iscsi_gw_vms']
+PUBLIC_SUBNET = settings['public_subnet']
+CLUSTER_SUBNET = settings['cluster_subnet']
+BOX = settings['vagrant_box']
+CLIENT_BOX = settings['client_vagrant_box']
+BOX_URL = settings['vagrant_box_url']
+SYNC_DIR = settings['vagrant_sync_dir']
+MEMORY = settings['memory']
+ETH = settings['eth']
+USER = settings['ssh_username']
+
+ASSIGN_STATIC_IP = settings.fetch('assign_static_ip', true)
+DISABLE_SYNCED_FOLDER = settings.fetch('vagrant_disable_synced_folder', false)
+DISK_UUID = Time.now.utc.to_i
+
+def create_vmdk(name, size)
+ dir = Pathname.new(__FILE__).expand_path.dirname
+ path = File.join(dir, '.vagrant', name + '.vmdk')
+ `vmware-vdiskmanager -c -s #{size} -t 0 -a scsi #{path} \
+ 2>&1 > /dev/null` unless File.exist?(path)
+end
+
+Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
+ config.ssh.insert_key = false # workaround for https://github.com/mitchellh/vagrant/issues/5048
+ config.ssh.private_key_path = settings['ssh_private_key_path']
+ config.ssh.username = USER
+
+ config.vm.provider :libvirt do |lv|
+ # When using libvirt, avoid errors like:
+ # "CPU feature cmt not found"
+ lv.cpu_mode = 'host-passthrough'
+ # set libvirt uri if present
+ if not LIBVIRT_URI.empty?
+ lv.uri = LIBVIRT_URI
+ end
+ # set libvirt storage pool if present
+ if not LIBVIRT_STORAGE_POOL.empty?
+ lv.storage_pool_name = LIBVIRT_STORAGE_POOL
+ end
+ end
+
+ # Faster bootup. Disables mounting the sync folder for libvirt and virtualbox
+ if DISABLE_SYNCED_FOLDER
+ config.vm.provider :virtualbox do |v,override|
+ override.vm.synced_folder '.', SYNC_DIR, disabled: true
+ end
+ config.vm.provider :libvirt do |v,override|
+ override.vm.synced_folder '.', SYNC_DIR, disabled: true
+ end
+ end
+
+ (0..CLIENTS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}client#{i}" do |client|
+ client.vm.box = CLIENT_BOX
+ client.vm.hostname = "#{LABEL_PREFIX}client#{i}"
+ if ASSIGN_STATIC_IP
+ client.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.4#{i}"
+ end
+ # Virtualbox
+ client.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ client.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ client.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ lv.nic_model_type = "e1000"
+ end
+
+ # Parallels
+ client.vm.provider "parallels" do |prl|
+ prl.name = "client#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ client.vm.provider :linode do |provider|
+ provider.label = client.vm.hostname
+ end
+ end
+ end
+
+ (0..NRGWS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}rgw#{i}" do |rgw|
+ rgw.vm.box = BOX
+ rgw.vm.box_url = BOX_URL
+ rgw.vm.hostname = "#{LABEL_PREFIX}rgw#{i}"
+ if ASSIGN_STATIC_IP
+ rgw.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.5#{i}"
+ end
+
+ # Virtualbox
+ rgw.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ rgw.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ rgw.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ lv.nic_model_type = "e1000"
+ end
+
+ # Parallels
+ rgw.vm.provider "parallels" do |prl|
+ prl.name = "rgw#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ rgw.vm.provider :linode do |provider|
+ provider.label = rgw.vm.hostname
+ end
+ end
+ end
+
+ (0..NNFSS - 1).each do |i|
+ config.vm.define "nfs#{i}" do |nfs|
+ nfs.vm.box = BOX
+ nfs.vm.box_url = BOX_URL
+ nfs.vm.hostname = "nfs#{i}"
+ if ASSIGN_STATIC_IP
+ nfs.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.6#{i}"
+ end
+
+ # Virtualbox
+ nfs.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ nfs.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ nfs.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ lv.nic_model_type = "e1000"
+ end
+
+ # Parallels
+ nfs.vm.provider "parallels" do |prl|
+ prl.name = "nfs#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ nfs.vm.provider :linode do |provider|
+ provider.label = nfs.vm.hostname
+ end
+ end
+ end
+
+ (0..NMDSS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}mds#{i}" do |mds|
+ mds.vm.box = BOX
+ mds.vm.box_url = BOX_URL
+ mds.vm.hostname = "#{LABEL_PREFIX}mds#{i}"
+ if ASSIGN_STATIC_IP
+ mds.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.7#{i}"
+ end
+ # Virtualbox
+ mds.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ mds.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ mds.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ lv.nic_model_type = "e1000"
+ end
+ # Parallels
+ mds.vm.provider "parallels" do |prl|
+ prl.name = "mds#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ mds.vm.provider :linode do |provider|
+ provider.label = mds.vm.hostname
+ end
+ end
+ end
+
+ (0..NRBD_MIRRORS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}rbd_mirror#{i}" do |rbd_mirror|
+ rbd_mirror.vm.box = BOX
+ rbd_mirror.vm.box_url = BOX_URL
+ rbd_mirror.vm.hostname = "#{LABEL_PREFIX}rbd-mirror#{i}"
+ if ASSIGN_STATIC_IP
+ rbd_mirror.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.8#{i}"
+ end
+ # Virtualbox
+ rbd_mirror.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ rbd_mirror.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ rbd_mirror.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ lv.nic_model_type = "e1000"
+ end
+ # Parallels
+ rbd_mirror.vm.provider "parallels" do |prl|
+ prl.name = "rbd-mirror#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ rbd_mirror.vm.provider :linode do |provider|
+ provider.label = rbd_mirror.vm.hostname
+ end
+ end
+ end
+
+ (0..NISCSI_GWS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}iscsi_gw#{i}" do |iscsi_gw|
+ iscsi_gw.vm.box = BOX
+ iscsi_gw.vm.box_url = BOX_URL
+ iscsi_gw.vm.hostname = "#{LABEL_PREFIX}iscsi-gw#{i}"
+ if ASSIGN_STATIC_IP
+ iscsi_gw.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.9#{i}"
+ end
+ # Virtualbox
+ iscsi_gw.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ iscsi_gw.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ iscsi_gw.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ lv.nic_model_type = "e1000"
+ end
+ # Parallels
+ iscsi_gw.vm.provider "parallels" do |prl|
+ prl.name = "iscsi-gw#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ iscsi_gw.vm.provider :linode do |provider|
+ provider.label = iscsi_gw.vm.hostname
+ end
+ end
+ end
+
+ (0..NMONS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}mon#{i}" do |mon|
+ mon.vm.box = BOX
+ mon.vm.box_url = BOX_URL
+ mon.vm.hostname = "#{LABEL_PREFIX}mon#{i}"
+ if ASSIGN_STATIC_IP
+ mon.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.1#{i}"
+ end
+ # Virtualbox
+ mon.vm.provider :virtualbox do |vb|
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ mon.vm.provider :vmware_fusion do |v|
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ mon.vm.provider :libvirt do |lv|
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ lv.nic_model_type = "e1000"
+ end
+
+ # Parallels
+ mon.vm.provider "parallels" do |prl|
+ prl.name = "mon#{i}"
+ prl.memory = "#{MEMORY}"
+ end
+
+ mon.vm.provider :linode do |provider|
+ provider.label = mon.vm.hostname
+ end
+ end
+ end
+
+ (0..NOSDS - 1).each do |i|
+ config.vm.define "#{LABEL_PREFIX}osd#{i}" do |osd|
+ osd.vm.box = BOX
+ osd.vm.box_url = BOX_URL
+ osd.vm.hostname = "#{LABEL_PREFIX}osd#{i}"
+ if ASSIGN_STATIC_IP
+ osd.vm.network :private_network,
+ ip: "#{PUBLIC_SUBNET}.10#{i}"
+ osd.vm.network :private_network,
+ ip: "#{CLUSTER_SUBNET}.20#{i}"
+ end
+ # Virtualbox
+ osd.vm.provider :virtualbox do |vb|
+ # Create our own controller for consistency and to remove VM dependency
+ # but only do it once, otherwise it would fail when rebooting machines.
+ # We assume this has run if one disk was created before
+ unless File.exist?("disk-#{i}-0.vdi")
+ vb.customize ['storagectl', :id,
+ '--name', 'OSD Controller',
+ '--add', 'scsi']
+ end
+ (0..2).each do |d|
+ vb.customize ['createhd',
+ '--filename', "disk-#{i}-#{d}",
+ '--size', '12000'] unless File.exist?("disk-#{i}-#{d}.vdi")
+ vb.customize ['storageattach', :id,
+ '--storagectl', 'OSD Controller',
+ '--port', 3 + d,
+ '--device', 0,
+ '--type', 'hdd',
+ '--medium', "disk-#{i}-#{d}.vdi"]
+ end
+ vb.customize ['modifyvm', :id, '--memory', "#{MEMORY}"]
+ end
+
+ # VMware
+ osd.vm.provider :vmware_fusion do |v|
+ (0..1).each do |d|
+ v.vmx["scsi0:#{d + 1}.present"] = 'TRUE'
+ v.vmx["scsi0:#{d + 1}.fileName"] =
+ create_vmdk("disk-#{i}-#{d}", '11000MB')
+ end
+ v.vmx['memsize'] = "#{MEMORY}"
+ end
+
+ # Libvirt
+ driverletters = ('a'..'z').to_a
+ osd.vm.provider :libvirt do |lv|
+ # always make /dev/sd{a/b/c/d} so that CI can ensure that
+ # virtualbox and libvirt will have the same devices to use for OSDs
+ (0..3).each do |d|
+ lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => '12G', :bus => "ide"
+ end
+ lv.memory = MEMORY
+ lv.random_hostname = true
+ lv.nic_model_type = "e1000"
+ end
+
+ # Parallels
+ osd.vm.provider "parallels" do |prl|
+ prl.name = "osd#{i}"
+ prl.memory = "#{MEMORY}"
+ (0..1).each do |d|
+ prl.customize ["set", :id,
+ "--device-add",
+ "hdd",
+ "--iface",
+ "sata"]
+ end
+ end
+
+ osd.vm.provider :linode do |provider|
+ provider.label = osd.vm.hostname
+ end
+
+ end
+ end
+end
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/group_vars/all
new file mode 100644
index 00000000..ae65f9b5
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/group_vars/all
@@ -0,0 +1,33 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+dmcrypt: true
+num_osds: 2
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+devices:
+ - /dev/sdb
+ - /dev/sdc
+ - /dev/nvme0n1
+ - /dev/nvme1n1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/setup.yml
new file mode 120000
index 00000000..8cf11d4e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_mixed_type.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/test.yml
new file mode 120000
index 00000000..66d44c72
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test_explicit.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml
new file mode 120000
index 00000000..cb969fa1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml
new file mode 100644
index 00000000..7d1a4449
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/group_vars/all
new file mode 100644
index 00000000..ae65f9b5
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/group_vars/all
@@ -0,0 +1,33 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+dmcrypt: true
+num_osds: 2
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+devices:
+ - /dev/sdb
+ - /dev/sdc
+ - /dev/nvme0n1
+ - /dev/nvme1n1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/setup.yml
new file mode 120000
index 00000000..8cf11d4e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_mixed_type.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test.yml
new file mode 120000
index 00000000..aa867bcd
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test_zap.yml
new file mode 120000
index 00000000..cb969fa1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/vagrant_variables.yml
new file mode 100644
index 00000000..7d1a4449
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-dmcrypt/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/group_vars/all
new file mode 100644
index 00000000..c2e356fe
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/group_vars/all
@@ -0,0 +1,34 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+num_osds: 2
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+devices:
+ - /dev/sdb
+ - /dev/sdc
+ - /dev/nvme0n1
+ - /dev/nvme1n1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+# 9GB in bytes
+block_db_size: 9663676416
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/setup.yml
new file mode 120000
index 00000000..8cf11d4e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_mixed_type.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/test.yml
new file mode 120000
index 00000000..66d44c72
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test_explicit.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/test_zap.yml
new file mode 120000
index 00000000..cb969fa1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/vagrant_variables.yml
new file mode 100644
index 00000000..7d1a4449
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type-explicit/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/group_vars/all
new file mode 100644
index 00000000..c2e356fe
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/group_vars/all
@@ -0,0 +1,34 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+num_osds: 2
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+devices:
+ - /dev/sdb
+ - /dev/sdc
+ - /dev/nvme0n1
+ - /dev/nvme1n1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+# 9GB in bytes
+block_db_size: 9663676416
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/setup.yml
new file mode 120000
index 00000000..8cf11d4e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_mixed_type.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test.yml
new file mode 120000
index 00000000..aa867bcd
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test_zap.yml
new file mode 120000
index 00000000..cb969fa1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/vagrant_variables.yml
new file mode 100644
index 00000000..7d1a4449
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/mixed-type/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/group_vars/all
new file mode 100644
index 00000000..92ca5bce
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/group_vars/all
@@ -0,0 +1,30 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+dmcrypt: true
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+devices:
+ - /dev/sdb
+ - /dev/sdc
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/setup.yml
new file mode 120000
index 00000000..30874dfb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/noop.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test.yml
new file mode 120000
index 00000000..aa867bcd
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test_zap.yml
new file mode 120000
index 00000000..cb969fa1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/vagrant_variables.yml
new file mode 100644
index 00000000..7d1a4449
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type-dmcrypt/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/group_vars/all
new file mode 100644
index 00000000..f71c89ef
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/group_vars/all
@@ -0,0 +1,29 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+devices:
+ - /dev/sdb
+ - /dev/sdc
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/setup.yml
new file mode 120000
index 00000000..30874dfb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/noop.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test.yml
new file mode 120000
index 00000000..aa867bcd
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test_zap.yml
new file mode 120000
index 00000000..cb969fa1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/vagrant_variables.yml
new file mode 100644
index 00000000..7d1a4449
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/bluestore/single-type/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/group_vars/all
new file mode 100644
index 00000000..006e9b85
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/group_vars/all
@@ -0,0 +1,33 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "filestore"
+osd_scenario: lvm
+dmcrypt: true
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+num_osds: 2
+devices:
+ - /dev/sdb
+ - /dev/sdc
+ - /dev/nvme0n1
+ - /dev/nvme1n1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/setup.yml
new file mode 120000
index 00000000..8cf11d4e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_mixed_type.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/test.yml
new file mode 120000
index 00000000..66d44c72
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test_explicit.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/test_zap.yml
new file mode 120000
index 00000000..cb969fa1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml
new file mode 100644
index 00000000..7d1a4449
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/group_vars/all
new file mode 100644
index 00000000..006e9b85
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/group_vars/all
@@ -0,0 +1,33 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "filestore"
+osd_scenario: lvm
+dmcrypt: true
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+num_osds: 2
+devices:
+ - /dev/sdb
+ - /dev/sdc
+ - /dev/nvme0n1
+ - /dev/nvme1n1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/setup.yml
new file mode 120000
index 00000000..8cf11d4e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_mixed_type.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test.yml
new file mode 120000
index 00000000..aa867bcd
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test_zap.yml
new file mode 120000
index 00000000..cb969fa1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/vagrant_variables.yml
new file mode 100644
index 00000000..7d1a4449
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-dmcrypt/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/group_vars/all
new file mode 100644
index 00000000..0b287c58
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/group_vars/all
@@ -0,0 +1,32 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "filestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+num_osds: 2
+devices:
+ - /dev/sdb
+ - /dev/sdc
+ - /dev/nvme0n1
+ - /dev/nvme1n1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/setup.yml
new file mode 120000
index 00000000..8cf11d4e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_mixed_type.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/test.yml
new file mode 120000
index 00000000..66d44c72
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test_explicit.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/test_zap.yml
new file mode 120000
index 00000000..cb969fa1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/vagrant_variables.yml
new file mode 100644
index 00000000..7d1a4449
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type-explicit/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/group_vars/all
new file mode 100644
index 00000000..0b287c58
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/group_vars/all
@@ -0,0 +1,32 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "filestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+num_osds: 2
+devices:
+ - /dev/sdb
+ - /dev/sdc
+ - /dev/nvme0n1
+ - /dev/nvme1n1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/setup.yml
new file mode 120000
index 00000000..8cf11d4e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_mixed_type.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test.yml
new file mode 120000
index 00000000..aa867bcd
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test_zap.yml
new file mode 120000
index 00000000..cb969fa1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/vagrant_variables.yml
new file mode 100644
index 00000000..7d1a4449
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/mixed-type/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/group_vars/all
new file mode 100644
index 00000000..719321cb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/group_vars/all
@@ -0,0 +1,32 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "filestore"
+osd_scenario: lvm
+dmcrypt: true
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+devices:
+ - /dev/sdb
+ - /dev/sdc
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+ osd:
+ osd_journal_size: 2048
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/setup.yml
new file mode 120000
index 00000000..30874dfb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/noop.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test.yml
new file mode 120000
index 00000000..aa867bcd
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test_zap.yml
new file mode 120000
index 00000000..cb969fa1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/vagrant_variables.yml
new file mode 100644
index 00000000..7d1a4449
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type-dmcrypt/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/group_vars/all
new file mode 100644
index 00000000..8cf7a0c9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/group_vars/all
@@ -0,0 +1,29 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "filestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+devices:
+ - /dev/sdb
+ - /dev/sdc
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/setup.yml
new file mode 120000
index 00000000..30874dfb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/noop.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test.yml
new file mode 120000
index 00000000..aa867bcd
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test_zap.yml
new file mode 120000
index 00000000..cb969fa1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/vagrant_variables.yml
new file mode 100644
index 00000000..7d1a4449
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos7/filestore/single-type/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/noop.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/noop.yml
new file mode 100644
index 00000000..5922ecf2
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/noop.yml
@@ -0,0 +1,12 @@
+---
+
+# Allows to always include a 'setup.yml' file in functional tests, and execute
+# only on the ones that actually need it
+
+- hosts: all
+ gather_facts: no
+
+ tasks:
+
+ - debug:
+ msg: "This is an empty setup playbook. The current scenario didn't need any work done"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml
new file mode 100644
index 00000000..5ed9fdef
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/setup_mixed_type.yml
@@ -0,0 +1,188 @@
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: install lvm2
+ package:
+ name: lvm2
+ state: present
+ - name: tell lvm to globally ignore loop devices
+ lineinfile:
+ path: /etc/lvm/lvm.conf
+ line: ' global_filter = [ "r|loop|", "a|.*|" ]'
+ insertafter: '^devices {'
+ - name: tell lvm to ignore loop devices
+ lineinfile:
+ path: /etc/lvm/lvm.conf
+ line: ' filter = [ "r|loop|", "a|.*|" ]'
+ insertafter: '^devices {'
+ - name: rescan pvs
+ command: "pvscan --cache"
+ - name: create the nvme image systemd unit
+ copy:
+ content: |
+ [Unit]
+ Description=NVMe loop device
+ After=local-fs.target
+ Wants=local-fs.target
+
+ [Service]
+ Type=simple
+ ExecStart=/bin/bash /opt/ceph-nvme.sh
+ StandardOutput=journal
+ StandardError=journal
+
+ [Install]
+ WantedBy=multi-user.target
+ dest: "/etc/systemd/system/ceph-nvme.service"
+
+ - name: create the ceph-nvme startup script
+ copy:
+ content: |
+ set -x
+ set -e
+ modprobe nvmet
+ modprobe nvme_loop
+ modprobe nvme_fabrics
+ modprobe loop
+ losetup -v /dev/loop0 /opt/loop0_nvme0
+ losetup -v /dev/loop1 /opt/loop1_nvme1
+ losetup -l
+ nvmetcli restore /opt/loop.json
+ nvme connect -t loop -n testnqn1 -q hostnqn
+ nvme connect -t loop -n testnqn2 -q hostnqn
+ nvme list
+ dest: "/opt/ceph-nvme.sh"
+
+ - name: ensure ceph-nvme is enabled
+ service:
+ name: ceph-nvme
+ state: stopped
+ enabled: yes
+
+ - name: install nvme dependencies
+ package:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - nvme-cli
+ - nvmetcli
+
+ - name: enable NVME kernel modules
+ modprobe:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - nvmet
+ - nvme_loop
+ - nvme_fabrics
+
+ - name: detach nvme files from loop devices
+ command: "losetup -d /dev/{{ item }}"
+ failed_when: false
+ loop:
+ - loop0
+ - loop1
+
+ - name: remove previous nvme files
+ file:
+ path: "{{ item }}"
+ state: absent
+ loop:
+ - /opt/loop0_nvme0
+ - /opt/loop1_nvme1
+
+ - name: create 11GB sparse files for NVMe
+ command: "fallocate -l 11G {{ item }}"
+ loop:
+ - /opt/loop0_nvme0
+ - /opt/loop1_nvme1
+
+ - name: setup loop devices with sparse files
+ command: "losetup /dev/loop{{ item }} /opt/loop{{ item }}_nvme{{ item }}"
+ failed_when: false
+ loop:
+ - 0
+ - 1
+
+ - name: create the loop.json file for nvmetcli
+ copy:
+ content: |
+ {
+ "hosts": [
+ {
+ "nqn": "hostnqn"
+ }
+ ],
+ "ports": [
+ {
+ "addr": {
+ "adrfam": "",
+ "traddr": "",
+ "treq": "not specified",
+ "trsvcid": "",
+ "trtype": "loop"
+ },
+ "portid": 1,
+ "referrals": [],
+ "subsystems": [
+ "testnqn1",
+ "testnqn2"
+ ]
+ }
+ ],
+ "subsystems": [
+ {
+ "allowed_hosts": [
+ "hostnqn"
+ ],
+ "attr": {
+ "allow_any_host": "0"
+ },
+ "namespaces": [
+ {
+ "device": {
+ "nguid": "ef90689c-6c46-d44c-89c1-4067801309a8",
+ "path": "/dev/loop0"
+ },
+ "enable": 1,
+ "nsid": 1
+ }
+ ],
+ "nqn": "testnqn1"
+ },
+ {
+ "allowed_hosts": [
+ "hostnqn"
+ ],
+ "attr": {
+ "allow_any_host": "0"
+ },
+ "namespaces": [
+ {
+ "device": {
+ "nguid": "ef90689c-6c46-d44c-89c1-4067801309a7",
+ "path": "/dev/loop1"
+ },
+ "enable": 1,
+ "nsid": 2
+ }
+ ],
+ "nqn": "testnqn2"
+ }
+ ]
+ }
+ dest: "/opt/loop.json"
+
+ - name: setup the /dev/loop0 target with nvmetcli
+ command: nvmetcli restore /opt/loop.json
+
+ - name: connect the new target as an nvme device
+ command: "nvme connect -t loop -n testnqn{{ item }} -q hostnqn"
+ loop:
+ - 1
+ - 2
+
+ - name: debug output for nvme list
+ command: nvme list
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml
new file mode 100644
index 00000000..7c6c3059
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml
@@ -0,0 +1,72 @@
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd daemons
+ service:
+ name: "ceph-osd@{{ item }}"
+ state: stopped
+ with_items: "{{ osd_ids }}"
+
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: mark osds down
+ command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ with_items: "{{ osd_ids }}"
+ - name: purge osds
+ command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it"
+ with_items: "{{ osd_ids }}"
+
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: rescan pvs
+ command: "pvscan --cache"
+ - name: rescan vgs
+ command: "vgscan"
+ - name: list lvs
+ command: "lvs"
+ - name: zap devices used for OSDs
+ command: "ceph-volume --cluster {{ cluster }} lvm zap {{ item }} --destroy"
+ with_items: "{{ devices }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: batch create devices again
+ command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: ensure batch create is idempotent
+ command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
+ register: batch_cmd
+ failed_when: false
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: check batch idempotency
+ fail:
+ msg: "lvm batch failed idempotency check"
+ when:
+ - batch_cmd.rc != 0
+ - "'strategy changed' not in batch_cmd.stderr"
+
+ - name: run batch --report to see if devices get filtered
+ command: "ceph-volume --cluster {{ cluster }} lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
+ register: report_cmd
+ failed_when: false
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: check batch --report idempotency
+ fail:
+ msg: "lvm batch --report failed idempotency check"
+ when:
+ - report_cmd.rc != 0
+ - "'strategy changed' not in report_cmd.stderr"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml
new file mode 100644
index 00000000..1ff0acc9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml
@@ -0,0 +1,64 @@
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd daemons
+ service:
+ name: "ceph-osd@{{ item }}"
+ state: stopped
+ with_items: "{{ osd_ids }}"
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: mark osds down
+ command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ with_items: "{{ osd_ids }}"
+ - name: purge osds
+ command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it"
+ with_items: "{{ osd_ids }}"
+
+- hosts: osds
+ become: yes
+ vars:
+ external_devices: "{{ '--db-devices' if osd_objectstore == 'bluestore' else '--journal-devices' }}"
+ tasks:
+
+ - name: zap devices used for OSDs
+ command: "ceph-volume --cluster {{ cluster }} lvm zap {{ item }} --destroy"
+ with_items: "{{ devices }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: batch create devices again
+ command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: ensure batch create is idempotent when all data devices are filtered
+ command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
+ register: batch_cmd
+ failed_when: false
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: check batch idempotency
+ fail:
+ msg: "lvm batch failed idempotency check"
+ when:
+ - batch_cmd.rc != 0
+
+ - name: run batch --report to see if devices get filtered
+ command: "ceph-volume --cluster {{ cluster }} lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
+ register: report_cmd
+ failed_when: false
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: check batch --report idempotency
+ fail:
+ msg: "lvm batch --report failed idempotency check"
+ when:
+ - report_cmd.rc != 0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml
new file mode 100644
index 00000000..9d63df9e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml
@@ -0,0 +1,34 @@
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd daemons
+ service:
+ name: "ceph-osd@{{ item }}"
+ state: stopped
+ with_items: "{{ osd_ids }}"
+
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: mark osds down
+ command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ with_items: "{{ osd_ids }}"
+ - name: purge osds
+ command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it"
+ with_items: "{{ osd_ids }}"
+
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: zap devices used for OSDs
+ command: "ceph-volume --cluster {{ cluster }} lvm zap --osd-id {{ item }} --destroy"
+ with_items: "{{ osd_ids }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
new file mode 100644
index 00000000..017853a2
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
@@ -0,0 +1,78 @@
+[tox]
+envlist = {centos7,xenial}-{bluestore,filestore}-{single_type,single_type_dmcrypt},centos7-{bluestore,filestore}-{mixed_type,mixed_type_dmcrypt,mixed_type_explicit, mixed_type_dmcrypt_explicit}
+skipsdist = True
+
+[testenv]
+deps = mock
+whitelist_externals =
+ vagrant
+ bash
+ git
+ cp
+passenv=*
+setenv=
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
+ ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions
+ ANSIBLE_STDOUT_CALLBACK = debug
+ ANSIBLE_RETRY_FILES_ENABLED = False
+ ANSIBLE_SSH_RETRIES = 5
+ VAGRANT_CWD = {changedir}
+ CEPH_VOLUME_DEBUG = 1
+ DEBIAN_FRONTEND=noninteractive
+changedir=
+ centos7-filestore-single_type: {toxinidir}/centos7/filestore/single-type
+ centos7-filestore-single_type_dmcrypt: {toxinidir}/centos7/filestore/single-type-dmcrypt
+ centos7-filestore-mixed_type: {toxinidir}/centos7/filestore/mixed-type
+ centos7-filestore-mixed_type_dmcrypt: {toxinidir}/centos7/filestore/mixed-type-dmcrypt
+ centos7-filestore-mixed_type_explicit: {toxinidir}/centos7/filestore/mixed-type-explicit
+ centos7-filestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos7/filestore/mixed-type-dmcrypt-explicit
+ centos7-bluestore-single_type: {toxinidir}/centos7/bluestore/single-type
+ centos7-bluestore-single_type_dmcrypt: {toxinidir}/centos7/bluestore/single-type-dmcrypt
+ centos7-bluestore-mixed_type: {toxinidir}/centos7/bluestore/mixed-type
+ centos7-bluestore-mixed_type_dmcrypt: {toxinidir}/centos7/bluestore/mixed-type-dmcrypt
+ centos7-bluestore-mixed_type_explicit: {toxinidir}/centos7/bluestore/mixed-type-explicit
+ centos7-bluestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos7/bluestore/mixed-type-dmcrypt-explicit
+ xenial-filestore-single_type: {toxinidir}/xenial/filestore/single-type
+ xenial-filestore-single_type_dmcrypt: {toxinidir}/xenial/filestore/single-type-dmcrypt
+ xenial-bluestore-single_type: {toxinidir}/xenial/bluestore/single-type
+ xenial-bluestore-single_type_dmcrypt: {toxinidir}/xenial/bluestore/single-type-dmcrypt
+commands=
+ git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
+ python -m pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
+
+ bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
+ bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
+
+ cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
+
+ # individual scenario setup
+ ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml
+
+ # use ceph-ansible to deploy a ceph cluster on the vms
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
+
+ # prepare nodes for testing with testinfra
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
+
+ # test cluster state using testinfra
+ py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+
+ # reboot all vms - attempt
+ bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
+
+ # after a reboot, osds may take about 20 seconds to come back up
+ sleep 30
+
+ # retest to ensure cluster came back up correctly after rebooting
+ py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+
+ # destroy an OSD, zap it's device and recreate it using it's ID
+ ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
+
+ # retest to ensure cluster came back up correctly
+ py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+
+ # test zap OSDs by ID
+ ansible-playbook -vv -i {changedir}/hosts {changedir}/test_zap.yml
+
+ vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/.DS_Store b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/.DS_Store
new file mode 100644
index 00000000..5008ddfc
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/.DS_Store
Binary files differ
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/group_vars/all
new file mode 100644
index 00000000..59151426
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/group_vars/all
@@ -0,0 +1,30 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+devices:
+ - /dev/sdb
+ - /dev/sdc
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/setup.yml
new file mode 120000
index 00000000..30874dfb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/noop.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test.yml
new file mode 120000
index 00000000..aa867bcd
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test_zap.yml
new file mode 120000
index 00000000..cb969fa1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/vagrant_variables.yml
new file mode 100644
index 00000000..7252344d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type-dmcrypt/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: ceph/ubuntu-xenial
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/group_vars/all
new file mode 100644
index 00000000..f71c89ef
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/group_vars/all
@@ -0,0 +1,29 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+devices:
+ - /dev/sdb
+ - /dev/sdc
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/setup.yml
new file mode 120000
index 00000000..30874dfb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/noop.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test.yml
new file mode 120000
index 00000000..aa867bcd
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test_zap.yml
new file mode 120000
index 00000000..cb969fa1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/vagrant_variables.yml
new file mode 100644
index 00000000..7252344d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/bluestore/single-type/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: ceph/ubuntu-xenial
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/group_vars/all
new file mode 100644
index 00000000..a4eafa10
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/group_vars/all
@@ -0,0 +1,32 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "filestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+devices:
+ - /dev/sdb
+ - /dev/sdc
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+ osd:
+ osd_journal_size: 2048
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/setup.yml
new file mode 120000
index 00000000..30874dfb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/noop.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test.yml
new file mode 120000
index 00000000..aa867bcd
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test_zap.yml
new file mode 120000
index 00000000..cb969fa1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/vagrant_variables.yml
new file mode 100644
index 00000000..7252344d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type-dmcrypt/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: ceph/ubuntu-xenial
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/group_vars/all
new file mode 100644
index 00000000..8cf7a0c9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/group_vars/all
@@ -0,0 +1,29 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+osd_objectstore: "filestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+devices:
+ - /dev/sdb
+ - /dev/sdc
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+
+# The following is only needed for testing purposes and is not part of
+# ceph-ansible supported variables
+
+osd_ids:
+ - 0
+ - 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/setup.yml
new file mode 120000
index 00000000..30874dfb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/noop.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test.yml
new file mode 120000
index 00000000..aa867bcd
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test_zap.yml
new file mode 120000
index 00000000..cb969fa1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/test_zap.yml
@@ -0,0 +1 @@
+../../../playbooks/test_zap.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/vagrant_variables.yml
new file mode 100644
index 00000000..7252344d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/xenial/filestore/single-type/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: ceph/ubuntu-xenial
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/group_vars/all
new file mode 100644
index 00000000..01ae1dae
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/group_vars/all
@@ -0,0 +1,29 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ crush_device_class: test
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+ - data: /dev/sdd1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/setup.yml
new file mode 120000
index 00000000..1c1a3ce8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_partitions.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/test.yml
new file mode 120000
index 00000000..165d9da2
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test_bluestore.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/vagrant_variables.yml
new file mode 100644
index 00000000..7d1a4449
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/group_vars/all
new file mode 100644
index 00000000..9d4f50de
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/group_vars/all
@@ -0,0 +1,30 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ crush_device_class: test
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+ - data: /dev/sdd1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/setup.yml
new file mode 120000
index 00000000..1c1a3ce8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_partitions.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml
new file mode 100644
index 00000000..bbd5b45d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml
@@ -0,0 +1,104 @@
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: destroy osd.2
+ command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ # osd.2 device
+ - name: zap /dev/sdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # partitions have been completely removed, so re-create them again
+ - name: re-create partition /dev/sdd for lvm data usage
+ parted:
+ device: /dev/sdd
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+
+ - name: redeploy osd.2 using /dev/sdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.0 lv
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: redeploy osd.0 using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+
+
+- hosts: osds
+ become: yes
+ tasks:
+
+
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: prepare osd.0 using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: activate all to start the previously prepared osd.0
+ command: "ceph-volume lvm activate --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: node inventory
+ command: "ceph-volume inventory"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: list all OSDs
+ command: "ceph-volume lvm list"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/vagrant_variables.yml
new file mode 100644
index 00000000..7d1a4449
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/group_vars/all
new file mode 100644
index 00000000..5af1b7ac
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/group_vars/all
@@ -0,0 +1,32 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda
+lvm_volumes:
+ - data: data-lv1
+ journal: /dev/sdc1
+ data_vg: test_group
+ crush_device_class: test
+ - data: data-lv2
+ journal: journal1
+ data_vg: test_group
+ journal_vg: journals
+ - data: /dev/sdd1
+ journal: /dev/sdd2
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/setup.yml
new file mode 120000
index 00000000..1c1a3ce8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_partitions.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/test.yml
new file mode 120000
index 00000000..1a8c37c1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test_filestore.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/vagrant_variables.yml
new file mode 100644
index 00000000..7d1a4449
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/group_vars/all
new file mode 100644
index 00000000..7544678b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/group_vars/all
@@ -0,0 +1,33 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda
+lvm_volumes:
+ - data: data-lv1
+ journal: /dev/sdc1
+ data_vg: test_group
+ crush_device_class: test
+ - data: data-lv2
+ journal: journal1
+ data_vg: test_group
+ journal_vg: journals
+ - data: /dev/sdd1
+ journal: /dev/sdd2
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/setup.yml
new file mode 120000
index 00000000..1c1a3ce8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_partitions.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml
new file mode 100644
index 00000000..91c9a1b8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml
@@ -0,0 +1,108 @@
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: destroy osd.2
+ command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ # osd.2 device
+ - name: zap /dev/sdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: zap /dev/sdd2
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # partitions have been completely removed, so re-create them again
+ - name: re-create partition /dev/sdd for lvm data usage
+ parted:
+ device: /dev/sdd
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+
+ - name: re-create partition /dev/sdd lvm journals
+ parted:
+ device: /dev/sdd
+ number: 2
+ part_start: 50%
+ part_end: 100%
+ unit: '%'
+ state: present
+ label: gpt
+
+ - name: redeploy osd.2 using /dev/sdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.0 lv
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: zap /dev/sdc1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: re-create partition /dev/sdc1
+ parted:
+ device: /dev/sdc
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ state: present
+ label: gpt
+
+ - name: prepare osd.0 again using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: activate all to start the previously prepared osd.0
+ command: "ceph-volume lvm activate --filestore --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: node inventory
+ command: "ceph-volume inventory"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: list all OSDs
+ command: "ceph-volume lvm list"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/vagrant_variables.yml
new file mode 100644
index 00000000..7d1a4449
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml
new file mode 100644
index 00000000..37a48949
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml
@@ -0,0 +1,27 @@
+---
+
+- hosts: osds
+ gather_facts: false
+ become: yes
+ tasks:
+
+ - name: partition /dev/sdd for lvm data usage
+ parted:
+ device: /dev/sdd
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+
+ - name: partition /dev/sdd lvm journals
+ parted:
+ device: /dev/sdd
+ number: 2
+ part_start: 50%
+ part_end: 100%
+ unit: '%'
+ state: present
+ label: gpt
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml
new file mode 100644
index 00000000..1e9b8c3e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml
@@ -0,0 +1,148 @@
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: destroy osd.2
+ command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ # osd.2 device
+ - name: zap /dev/sdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # partitions have been completely removed, so re-create them again
+ - name: re-create partition /dev/sdd for lvm data usage
+ parted:
+ device: /dev/sdd
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+
+ - name: redeploy osd.2 using /dev/sdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.0 device (zap without --destroy that removes the LV)
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: prepare osd.0 again using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: find all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ recurse: no
+ file_type: directory
+ register: osd_directories
+
+ - name: find all OSD symlinks
+ find:
+ paths: /var/lib/ceph/osd
+ recurse: yes
+ depth: 2
+ file_type: link
+ register: osd_symlinks
+
+ # set the OSD dir and the block/block.db links to root:root permissions, to
+ # ensure that the OSD will be able to activate regardless
+ - file:
+ path: "{{ item.path }}"
+ owner: root
+ group: root
+ with_items:
+ - "{{ osd_directories.files }}"
+
+ - file:
+ path: "{{ item.path }}"
+ owner: root
+ group: root
+ with_items:
+ - "{{ osd_symlinks.files }}"
+
+ - name: activate all to start the previously prepared osd.0
+ command: "ceph-volume lvm activate --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: node inventory
+ command: "ceph-volume inventory"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: list all OSDs
+ command: "ceph-volume lvm list"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: create temporary directory
+ tempfile:
+ state: directory
+ suffix: sparse
+ register: tmpdir
+
+ - name: create a 5GB sparse file
+ command: fallocate -l 5G {{ tmpdir.path }}/sparse.file
+
+ - name: find an empty loop device
+ command: losetup -f
+ register: losetup_list
+
+ - name: setup loop device with sparse file
+ command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
+
+ - name: create volume group
+ command: vgcreate test_zap {{ losetup_list.stdout }}
+ failed_when: false
+
+ - name: create logical volume 1
+ command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
+ failed_when: false
+
+ - name: create logical volume 2
+ command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
+ failed_when: false
+
+ # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
+ - name: zap test_zap/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: zap test_zap/data-lv2
+ command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml
new file mode 100644
index 00000000..4e43839e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml
@@ -0,0 +1,169 @@
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: destroy osd.2
+ command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ # osd.2 device
+ - name: zap /dev/sdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.2 journal
+ - name: zap /dev/sdd2
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # partitions have been completely removed, so re-create them again
+ - name: re-create partition /dev/sdd for lvm data usage
+ parted:
+ device: /dev/sdd
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+
+ - name: re-create partition /dev/sdd lvm journals
+ parted:
+ device: /dev/sdd
+ number: 2
+ part_start: 50%
+ part_end: 100%
+ unit: '%'
+ state: present
+ label: gpt
+
+ - name: redeploy osd.2 using /dev/sdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.0 data lv
+ # note: we don't use --destroy here to test this works without that flag.
+ # --destroy is used in the bluestore tests
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.0 journal device (zap without --destroy that removes the LV)
+ - name: zap /dev/sdc1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: prepare osd.0 again using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: find all OSD paths
+ find:
+ paths: /var/lib/ceph/osd
+ recurse: no
+ file_type: directory
+ register: osd_paths
+
+ # set all OSD paths to root:rootto ensure that the OSD will be able to
+ # activate regardless
+ - name: mangle permissions to root
+ file:
+ path: "{{ item.path }}"
+ owner: root
+ group: root
+ recurse: yes
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: stop ceph-osd@1 daemon
+ service:
+ name: ceph-osd@1
+ state: stopped
+
+ - name: activate all to start the previously prepared osd.0
+ command: "ceph-volume lvm activate --filestore --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: node inventory
+ command: "ceph-volume inventory"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: list all OSDs
+ command: "ceph-volume lvm list"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: create temporary directory
+ tempfile:
+ state: directory
+ suffix: sparse
+ register: tmpdir
+
+ - name: create a 5GB sparse file
+ command: fallocate -l 5G {{ tmpdir.path }}/sparse.file
+
+ - name: find an empty loop device
+ command: losetup -f
+ register: losetup_list
+
+ - name: setup loop device with sparse file
+ command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
+
+ - name: create volume group
+ command: vgcreate test_zap {{ losetup_list.stdout }}
+ failed_when: false
+
+ - name: create logical volume 1
+ command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
+ failed_when: false
+
+ - name: create logical volume 2
+ command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
+ failed_when: false
+
+ # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
+ - name: zap test_zap/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: zap test_zap/data-lv2
+ command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
new file mode 100644
index 00000000..0b38c85b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
@@ -0,0 +1,79 @@
+[tox]
+envlist = {centos7,xenial}-{filestore,bluestore}-{create,prepare_activate,dmcrypt}
+skipsdist = True
+
+[testenv]
+deps = mock
+whitelist_externals =
+ vagrant
+ bash
+ git
+ cp
+ sleep
+passenv=*
+setenv=
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
+ ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions
+ ANSIBLE_STDOUT_CALLBACK = debug
+ ANSIBLE_RETRY_FILES_ENABLED = False
+ ANSIBLE_SSH_RETRIES = 5
+ VAGRANT_CWD = {changedir}
+ CEPH_VOLUME_DEBUG = 1
+ DEBIAN_FRONTEND=noninteractive
+changedir=
+ # plain/unencrypted
+ centos7-filestore-create: {toxinidir}/centos7/filestore/create
+ centos7-bluestore-create: {toxinidir}/centos7/bluestore/create
+ xenial-filestore-create: {toxinidir}/xenial/filestore/create
+ xenial-bluestore-create: {toxinidir}/xenial/bluestore/create
+ # dmcrypt
+ centos7-filestore-dmcrypt: {toxinidir}/centos7/filestore/dmcrypt
+ centos7-bluestore-dmcrypt: {toxinidir}/centos7/bluestore/dmcrypt
+ xenial-filestore-dmcrypt: {toxinidir}/xenial/filestore/dmcrypt
+ xenial-bluestore-dmcrypt: {toxinidir}/xenial/bluestore/dmcrypt
+ # TODO: these are placeholders for now, eventually we want to
+ # test the prepare/activate workflow of ceph-volume as well
+ xenial-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate
+ xenial-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
+ centos7-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate
+ centos7-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
+commands=
+ git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
+ pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
+
+ bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
+ bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
+
+ # create logical volumes to test with on the vms
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml
+
+ # ad-hoc/local test setup for lvm
+ ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml
+
+ cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
+
+ # use ceph-ansible to deploy a ceph cluster on the vms
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
+
+ # prepare nodes for testing with testinfra
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
+
+ # test cluster state using testinfra
+ py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+
+ # reboot all vms - attempt
+ bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
+
+ # after a reboot, osds may take about 20 seconds to come back up
+ sleep 30
+
+ # retest to ensure cluster came back up correctly after rebooting
+ py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+
+ # destroy an OSD, zap it's device and recreate it using it's ID
+ ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
+
+ # retest to ensure cluster came back up correctly
+ py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+
+ vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/group_vars/all
new file mode 100644
index 00000000..01ae1dae
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/group_vars/all
@@ -0,0 +1,29 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ crush_device_class: test
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+ - data: /dev/sdd1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/setup.yml
new file mode 120000
index 00000000..1c1a3ce8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_partitions.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/test.yml
new file mode 120000
index 00000000..165d9da2
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test_bluestore.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/vagrant_variables.yml
new file mode 100644
index 00000000..7252344d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: ceph/ubuntu-xenial
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/group_vars/all
new file mode 100644
index 00000000..9d4f50de
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/group_vars/all
@@ -0,0 +1,30 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+lvm_volumes:
+ - data: data-lv1
+ data_vg: test_group
+ crush_device_class: test
+ - data: data-lv2
+ data_vg: test_group
+ db: journal1
+ db_vg: journals
+ - data: /dev/sdd1
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/setup.yml
new file mode 120000
index 00000000..1c1a3ce8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_partitions.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml
new file mode 100644
index 00000000..27290d93
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml
@@ -0,0 +1,104 @@
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: destroy osd.2
+ command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ # osd.2 device
+ - name: zap /dev/sdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # partitions have been completely removed, so re-create them again
+ - name: re-create partition /dev/sdd for lvm data usage
+ parted:
+ device: /dev/sdd
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+
+ - name: redeploy osd.2 using /dev/sdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.0 lv
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: redeploy osd.0 using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: prepare osd.0 using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: activate all to start the previously prepared osd.0
+ command: "ceph-volume lvm activate --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: node inventory
+ command: "ceph-volume inventory"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: list all OSDs
+ command: "ceph-volume lvm list"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/vagrant_variables.yml
new file mode 100644
index 00000000..7252344d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/vagrant_variables.yml
@@ -0,0 +1,56 @@
+---
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: ceph/ubuntu-xenial
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/group_vars/all
new file mode 100644
index 00000000..5af1b7ac
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/group_vars/all
@@ -0,0 +1,32 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda
+lvm_volumes:
+ - data: data-lv1
+ journal: /dev/sdc1
+ data_vg: test_group
+ crush_device_class: test
+ - data: data-lv2
+ journal: journal1
+ data_vg: test_group
+ journal_vg: journals
+ - data: /dev/sdd1
+ journal: /dev/sdd2
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/setup.yml
new file mode 120000
index 00000000..1c1a3ce8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_partitions.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/test.yml
new file mode 120000
index 00000000..1a8c37c1
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/test.yml
@@ -0,0 +1 @@
+../../../playbooks/test_filestore.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/vagrant_variables.yml
new file mode 100644
index 00000000..82b330ef
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/vagrant_variables.yml
@@ -0,0 +1,54 @@
+---
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: ceph/ubuntu-xenial
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/group_vars/all
new file mode 100644
index 00000000..7544678b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/group_vars/all
@@ -0,0 +1,33 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.3.0/24"
+cluster_network: "192.168.4.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+osd_scenario: lvm
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda
+lvm_volumes:
+ - data: data-lv1
+ journal: /dev/sdc1
+ data_vg: test_group
+ crush_device_class: test
+ - data: data-lv2
+ journal: journal1
+ data_vg: test_group
+ journal_vg: journals
+ - data: /dev/sdd1
+ journal: /dev/sdd2
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/hosts
new file mode 100644
index 00000000..e1c1de6f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/hosts
@@ -0,0 +1,8 @@
+[mons]
+mon0
+
+[osds]
+osd0
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/setup.yml
new file mode 120000
index 00000000..1c1a3ce8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/setup.yml
@@ -0,0 +1 @@
+../../../playbooks/setup_partitions.yml \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/test.yml
new file mode 100644
index 00000000..91c9a1b8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/test.yml
@@ -0,0 +1,108 @@
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+
+- hosts: mons
+ become: yes
+ tasks:
+
+ - name: destroy osd.2
+ command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ # osd.2 device
+ - name: zap /dev/sdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: zap /dev/sdd2
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # partitions have been completely removed, so re-create them again
+ - name: re-create partition /dev/sdd for lvm data usage
+ parted:
+ device: /dev/sdd
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+
+ - name: re-create partition /dev/sdd lvm journals
+ parted:
+ device: /dev/sdd
+ number: 2
+ part_start: 50%
+ part_end: 100%
+ unit: '%'
+ state: present
+ label: gpt
+
+ - name: redeploy osd.2 using /dev/sdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.0 lv
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: zap /dev/sdc1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: re-create partition /dev/sdc1
+ parted:
+ device: /dev/sdc
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ state: present
+ label: gpt
+
+ - name: prepare osd.0 again using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: activate all to start the previously prepared osd.0
+ command: "ceph-volume lvm activate --filestore --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: node inventory
+ command: "ceph-volume inventory"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: list all OSDs
+ command: "ceph-volume lvm list"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/vagrant_variables.yml
new file mode 100644
index 00000000..82b330ef
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/vagrant_variables.yml
@@ -0,0 +1,54 @@
+---
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 1
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.3
+cluster_subnet: 192.168.4
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: ceph/ubuntu-xenial
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
diff --git a/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml b/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml
new file mode 100644
index 00000000..0b65a172
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml
@@ -0,0 +1,153 @@
+---
+# Defines deployment design and assigns role to server groups
+
+- hosts:
+ - mons
+ - osds
+ - mgrs
+
+ gather_facts: false
+ any_errors_fatal: true
+ become: true
+
+ tags:
+ - always
+
+ vars:
+ delegate_facts_host: True
+ dashboard_enabled: False
+
+ environment:
+ DEBIAN_FRONTEND: noninteractive
+
+ pre_tasks:
+ # If we can't get python2 installed before any module is used we will fail
+ # so just try what we can to get it installed
+ - name: check for python2
+ stat:
+ path: /usr/bin/python
+ ignore_errors: yes
+ register: systempython2
+
+ - name: install python2 for debian based systems
+ raw: sudo apt-get -y install python-simplejson
+ ignore_errors: yes
+ when:
+ - systempython2.stat is undefined or systempython2.stat.exists == false
+
+ # Ansible will try to auto-install python-apt, in some systems this might be
+ # python3-apt, or python-apt, and it has caused whole runs to fail because
+ # it is trying to do an interactive prompt
+ - name: install python-apt and aptitude in debian based systems
+ raw: sudo apt-get -y install "{{ item }}"
+ ignore_errors: yes
+ with_items:
+ - python3-apt
+ - python-apt
+ - aptitude
+
+ - name: install python2 for fedora
+ raw: sudo dnf -y install python creates=/usr/bin/python
+ ignore_errors: yes
+ when:
+ - systempython2.stat is undefined or systempython2.stat.exists == false
+
+ - name: install python2 for opensuse
+ raw: sudo zypper -n install python-base creates=/usr/bin/python2.7
+ ignore_errors: yes
+ when:
+ - systempython2.stat is undefined or systempython2.stat.exists == false
+
+ - name: gather facts
+ setup:
+ when:
+ - not delegate_facts_host | bool
+
+ - name: gather and delegate facts
+ setup:
+ delegate_to: "{{ item }}"
+ delegate_facts: True
+ with_items: "{{ groups['all'] }}"
+ run_once: true
+ when:
+ - delegate_facts_host | bool
+
+ - name: install required packages for fedora > 23
+ raw: sudo dnf -y install python2-dnf libselinux-python ntp
+ when:
+ - ansible_distribution == 'Fedora'
+ - ansible_distribution_major_version|int >= 23
+
+ - name: check if it is atomic host
+ stat:
+ path: /run/ostree-booted
+ register: stat_ostree
+
+ - name: set_fact is_atomic
+ set_fact:
+ is_atomic: '{{ stat_ostree.stat.exists }}'
+
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-validate
+
+- hosts:
+ - mons
+ - osds
+ - mgrs
+ gather_facts: false
+ become: True
+ any_errors_fatal: true
+ vars:
+ dashboard_enabled: False
+ tasks:
+ - import_role:
+ name: ceph-defaults
+ - import_role:
+ name: ceph-facts
+ - import_role:
+ name: ceph-handler
+ - import_role:
+ name: ceph-common
+
+ - name: rsync ceph-volume to test nodes on centos
+ synchronize:
+ src: "{{ toxinidir }}/../../../../ceph_volume"
+ dest: "/usr/lib/python2.7/site-packages"
+ use_ssh_args: true
+ when:
+ - ansible_os_family == "RedHat"
+ - inventory_hostname in groups.get(osd_group_name, [])
+
+ - name: rsync ceph-volume to test nodes on ubuntu
+ synchronize:
+ src: "{{ toxinidir }}/../../../../ceph_volume"
+ dest: "/usr/lib/python2.7/dist-packages"
+ use_ssh_args: true
+ when:
+ - ansible_os_family == "Debian"
+ - inventory_hostname in groups.get(osd_group_name, [])
+
+ - name: run ceph-config role
+ import_role:
+ name: ceph-config
+
+ - name: run ceph-mon role
+ import_role:
+ name: ceph-mon
+ when:
+ - inventory_hostname in groups.get(mon_group_name, [])
+
+ - name: run ceph-mgr role
+ import_role:
+ name: ceph-mgr
+ when:
+ - inventory_hostname in groups.get(mgr_group_name, [])
+
+ - name: run ceph-osd role
+ import_role:
+ name: ceph-osd
+ when:
+ - inventory_hostname in groups.get(osd_group_name, [])
diff --git a/src/ceph-volume/ceph_volume/tests/functional/scripts/generate_ssh_config.sh b/src/ceph-volume/ceph_volume/tests/functional/scripts/generate_ssh_config.sh
new file mode 100644
index 00000000..43e64a65
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/scripts/generate_ssh_config.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+# Generate a custom ssh config from Vagrant so that it can then be used by
+# ansible.cfg
+
+path=$1
+
+if [ $# -eq 0 ]
+ then
+ echo "A path to the scenario is required as an argument and it wasn't provided"
+ exit 1
+fi
+
+cd "$path"
+vagrant ssh-config > vagrant_ssh_config
diff --git a/src/ceph-volume/ceph_volume/tests/functional/scripts/output.py b/src/ceph-volume/ceph_volume/tests/functional/scripts/output.py
new file mode 100644
index 00000000..16071944
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/scripts/output.py
@@ -0,0 +1,5 @@
+import os
+from ceph_volume import terminal
+
+char = os.environ.get('INVALID')
+terminal.stdout(char)
diff --git a/src/ceph-volume/ceph_volume/tests/functional/scripts/test_unicode.sh b/src/ceph-volume/ceph_volume/tests/functional/scripts/test_unicode.sh
new file mode 100644
index 00000000..e4ba4f0a
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/scripts/test_unicode.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# Not entirely sure why these executables don't seem to be available in the
+# $PATH when running from tox. Calling out to `which` seems to fix it, at the
+# expense of making the script a bit obtuse
+
+mktemp=$(which mktemp)
+cat=$(which cat)
+grep=$(which grep)
+PYTHON_EXECUTABLE=`which python`
+STDERR_FILE=$($mktemp)
+INVALID="→"
+
+echo "stderr file created: $STDERR_FILE"
+
+INVALID="$INVALID" $PYTHON_EXECUTABLE $1 2> ${STDERR_FILE}
+
+retVal=$?
+
+if [ $retVal -ne 0 ]; then
+ echo "Failed test: Unexpected failure from running Python script"
+ echo "Below is output of stderr captured:"
+ $cat "${STDERR_FILE}"
+ exit $retVal
+fi
+
+$grep --quiet "$INVALID" ${STDERR_FILE}
+
+retVal=$?
+if [ $retVal -ne 0 ]; then
+ echo "Failed test: expected to find \"${INVALID}\" character in tmpfile: \"${STDERR_FILE}\""
+ echo "Below is output of stderr captured:"
+ $cat "${STDERR_FILE}"
+fi
+exit $retVal
diff --git a/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_reload.sh b/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_reload.sh
new file mode 100644
index 00000000..3211b066
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_reload.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+# vagrant-libvirt has a common behavior where it times out when "reloading" vms. Instead
+# of calling `vagrant reload` attempt to halt everything, and then start everything, which gives
+# this script the ability to try the `vagrant up` again in case of failure
+#
+
+vagrant halt
+# This should not really be needed, but in case of a possible race condition between halt
+# and up, it might improve things
+sleep 5
+
+
+retries=0
+until [ $retries -ge 5 ]
+do
+ echo "Attempting to start VMs. Attempts: $retries"
+ timeout 10m vagrant up "$@" && break
+ retries=$[$retries+1]
+ sleep 5
+done
diff --git a/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh b/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh
new file mode 100644
index 00000000..2f9a15f8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+retries=0
+until [ $retries -ge 5 ]
+do
+ echo "Attempting to start VMs. Attempts: $retries"
+ timeout 10m vagrant up "$@" && break
+ retries=$[$retries+1]
+ sleep 5
+done
+
+sleep 10
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/group_vars/all
new file mode 100644
index 00000000..c265e783
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/group_vars/all
@@ -0,0 +1,19 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd0.yml
new file mode 100644
index 00000000..2e1c7ee9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd1.yml
new file mode 100644
index 00000000..7e90071c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/hosts
new file mode 100644
index 00000000..e0c08b94
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/test.yml
new file mode 100644
index 00000000..24e2c035
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/test.yml
@@ -0,0 +1,31 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: list all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ file_type: directory
+ register: osd_paths
+
+ - name: scan all OSD directories
+ command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: list all OSD JSON files
+ find:
+ paths: /etc/ceph/osd
+ file_type: file
+ register: osd_configs
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_configs.files }}"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/vagrant_variables.yml
new file mode 100644
index 00000000..63700c3c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/group_vars/all
new file mode 100644
index 00000000..885c2c82
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/group_vars/all
@@ -0,0 +1,22 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+ osd:
+ osd_dmcrypt_type: luks
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd0.yml
new file mode 100644
index 00000000..2e1c7ee9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd1.yml
new file mode 100644
index 00000000..7e90071c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/hosts
new file mode 100644
index 00000000..e0c08b94
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/test.yml
new file mode 100644
index 00000000..55ae7cc8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/test.yml
@@ -0,0 +1,15 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: scan all running OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple scan"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/vagrant_variables.yml
new file mode 100644
index 00000000..63700c3c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/group_vars/all
new file mode 100644
index 00000000..30bcf5be
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/group_vars/all
@@ -0,0 +1,22 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+ osd:
+ osd_dmcrypt_type: plain
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd0.yml
new file mode 100644
index 00000000..2e1c7ee9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd1.yml
new file mode 100644
index 00000000..7e90071c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/hosts
new file mode 100644
index 00000000..e0c08b94
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/test.yml
new file mode 100644
index 00000000..24e2c035
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/test.yml
@@ -0,0 +1,31 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: list all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ file_type: directory
+ register: osd_paths
+
+ - name: scan all OSD directories
+ command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: list all OSD JSON files
+ find:
+ paths: /etc/ceph/osd
+ file_type: file
+ register: osd_configs
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_configs.files }}"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/vagrant_variables.yml
new file mode 100644
index 00000000..63700c3c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/group_vars/all
new file mode 100644
index 00000000..7ab573b0
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/group_vars/all
@@ -0,0 +1,19 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd0.yml
new file mode 100644
index 00000000..2e1c7ee9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd1.yml
new file mode 100644
index 00000000..7e90071c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/hosts
new file mode 100644
index 00000000..e0c08b94
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/test.yml
new file mode 100644
index 00000000..0745f257
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/test.yml
@@ -0,0 +1,29 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: list all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ file_type: directory
+ register: osd_paths
+
+ - name: scan all OSD directories
+ command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: list all OSD JSON files
+ find:
+ paths: /etc/ceph/osd
+ file_type: file
+ register: osd_configs
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/vagrant_variables.yml
new file mode 100644
index 00000000..63700c3c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/activate/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/group_vars/all
new file mode 100644
index 00000000..a27cfbad
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/group_vars/all
@@ -0,0 +1,22 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+ osd:
+ osd_dmcrypt_type: luks
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd0.yml
new file mode 100644
index 00000000..2e1c7ee9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd1.yml
new file mode 100644
index 00000000..7e90071c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/hosts
new file mode 100644
index 00000000..e0c08b94
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/test.yml
new file mode 100644
index 00000000..24e2c035
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/test.yml
@@ -0,0 +1,31 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: list all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ file_type: directory
+ register: osd_paths
+
+ - name: scan all OSD directories
+ command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: list all OSD JSON files
+ find:
+ paths: /etc/ceph/osd
+ file_type: file
+ register: osd_configs
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_configs.files }}"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/vagrant_variables.yml
new file mode 100644
index 00000000..63700c3c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-luks/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/group_vars/all
new file mode 100644
index 00000000..edac61b2
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/group_vars/all
@@ -0,0 +1,22 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+ osd:
+ osd_dmcrypt_type: plain
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd0.yml
new file mode 100644
index 00000000..2e1c7ee9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd1.yml
new file mode 100644
index 00000000..7e90071c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/hosts
new file mode 100644
index 00000000..e0c08b94
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/test.yml
new file mode 100644
index 00000000..24e2c035
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/test.yml
@@ -0,0 +1,31 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: list all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ file_type: directory
+ register: osd_paths
+
+ - name: scan all OSD directories
+ command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: list all OSD JSON files
+ find:
+ paths: /etc/ceph/osd
+ file_type: file
+ register: osd_configs
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_configs.files }}"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/vagrant_variables.yml
new file mode 100644
index 00000000..63700c3c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/filestore/dmcrypt-plain/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: centos/7
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini
new file mode 100644
index 00000000..e462d3b8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini
@@ -0,0 +1,66 @@
+[tox]
+envlist = {centos7,xenial}-{filestore,bluestore}-{activate,dmcrypt_plain,dmcrypt_luks}
+skipsdist = True
+
+[testenv]
+deps = mock
+whitelist_externals =
+ vagrant
+ bash
+ git
+ sleep
+ cp
+passenv=*
+setenv=
+ ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config
+ ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions
+ ANSIBLE_STDOUT_CALLBACK = debug
+ ANSIBLE_RETRY_FILES_ENABLED = False
+ ANSIBLE_SSH_RETRIES = 5
+ VAGRANT_CWD = {changedir}
+ CEPH_VOLUME_DEBUG = 1
+ DEBIAN_FRONTEND=noninteractive
+changedir=
+ centos7-filestore-activate: {toxinidir}/centos7/filestore/activate
+ centos7-bluestore-activate: {toxinidir}/centos7/bluestore/activate
+ xenial-filestore-activate: {toxinidir}/xenial/filestore/activate
+ xenial-bluestore-activate: {toxinidir}/xenial/bluestore/activate
+ xenial-bluestore-dmcrypt_plain: {toxinidir}/xenial/bluestore/dmcrypt-plain
+ xenial-bluestore-dmcrypt_luks: {toxinidir}/xenial/bluestore/dmcrypt-luks
+ xenial-filestore-dmcrypt_plain: {toxinidir}/xenial/filestore/dmcrypt-plain
+ xenial-filestore-dmcrypt_luks: {toxinidir}/xenial/filestore/dmcrypt-luks
+ centos7-bluestore-dmcrypt_plain: {toxinidir}/centos7/bluestore/dmcrypt-plain
+ centos7-bluestore-dmcrypt_luks: {toxinidir}/centos7/bluestore/dmcrypt-luks
+ centos7-filestore-dmcrypt_plain: {toxinidir}/centos7/filestore/dmcrypt-plain
+ centos7-filestore-dmcrypt_luks: {toxinidir}/centos7/filestore/dmcrypt-luks
+commands=
+ git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
+ pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
+
+ bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
+ bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
+
+ cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
+
+ # use ceph-ansible to deploy a ceph cluster on the vms
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
+
+ # prepare nodes for testing with testinfra
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
+
+ # test cluster state testinfra
+ py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+
+ # make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk
+ ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
+
+ # reboot all vms
+ bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
+
+ # wait 2 minutes for services to be ready
+ sleep 120
+
+ # retest to ensure cluster came back up correctly after rebooting
+ py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
+
+ vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/group_vars/all
new file mode 100644
index 00000000..c265e783
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/group_vars/all
@@ -0,0 +1,19 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/host_vars/osd0.yml
new file mode 100644
index 00000000..2e1c7ee9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/host_vars/osd1.yml
new file mode 100644
index 00000000..7e90071c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/hosts
new file mode 100644
index 00000000..e0c08b94
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/test.yml
new file mode 100644
index 00000000..24e2c035
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/test.yml
@@ -0,0 +1,31 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: list all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ file_type: directory
+ register: osd_paths
+
+ - name: scan all OSD directories
+ command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: list all OSD JSON files
+ find:
+ paths: /etc/ceph/osd
+ file_type: file
+ register: osd_configs
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_configs.files }}"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/vagrant_variables.yml
new file mode 100644
index 00000000..b4aa759a
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/activate/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: ceph/ubuntu-xenial
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/group_vars/all
new file mode 100644
index 00000000..885c2c82
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/group_vars/all
@@ -0,0 +1,22 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+ osd:
+ osd_dmcrypt_type: luks
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/host_vars/osd0.yml
new file mode 100644
index 00000000..2e1c7ee9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/host_vars/osd1.yml
new file mode 100644
index 00000000..7e90071c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/hosts
new file mode 100644
index 00000000..e0c08b94
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/test.yml
new file mode 100644
index 00000000..24e2c035
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/test.yml
@@ -0,0 +1,31 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: list all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ file_type: directory
+ register: osd_paths
+
+ - name: scan all OSD directories
+ command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: list all OSD JSON files
+ find:
+ paths: /etc/ceph/osd
+ file_type: file
+ register: osd_configs
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_configs.files }}"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/vagrant_variables.yml
new file mode 100644
index 00000000..b4aa759a
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-luks/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: ceph/ubuntu-xenial
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/group_vars/all
new file mode 100644
index 00000000..30bcf5be
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/group_vars/all
@@ -0,0 +1,22 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "bluestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+ osd:
+ osd_dmcrypt_type: plain
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/host_vars/osd0.yml
new file mode 100644
index 00000000..2e1c7ee9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/host_vars/osd1.yml
new file mode 100644
index 00000000..7e90071c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/hosts
new file mode 100644
index 00000000..e0c08b94
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/test.yml
new file mode 100644
index 00000000..24e2c035
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/test.yml
@@ -0,0 +1,31 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: list all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ file_type: directory
+ register: osd_paths
+
+ - name: scan all OSD directories
+ command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: list all OSD JSON files
+ find:
+ paths: /etc/ceph/osd
+ file_type: file
+ register: osd_configs
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_configs.files }}"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/vagrant_variables.yml
new file mode 100644
index 00000000..b4aa759a
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/bluestore/dmcrypt-plain/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: ceph/ubuntu-xenial
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/group_vars/all
new file mode 100644
index 00000000..7ab573b0
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/group_vars/all
@@ -0,0 +1,19 @@
+---
+
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/host_vars/osd0.yml
new file mode 100644
index 00000000..2e1c7ee9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/host_vars/osd1.yml
new file mode 100644
index 00000000..7e90071c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/hosts
new file mode 100644
index 00000000..e0c08b94
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/test.yml
new file mode 100644
index 00000000..55ae7cc8
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/test.yml
@@ -0,0 +1,15 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: scan all running OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple scan"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/vagrant_variables.yml
new file mode 100644
index 00000000..b4aa759a
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/activate/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: ceph/ubuntu-xenial
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/group_vars/all
new file mode 100644
index 00000000..a27cfbad
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/group_vars/all
@@ -0,0 +1,22 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+ osd:
+ osd_dmcrypt_type: luks
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/host_vars/osd0.yml
new file mode 100644
index 00000000..2e1c7ee9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/host_vars/osd1.yml
new file mode 100644
index 00000000..7e90071c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/hosts
new file mode 100644
index 00000000..e0c08b94
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/test.yml
new file mode 100644
index 00000000..24e2c035
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/test.yml
@@ -0,0 +1,31 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: list all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ file_type: directory
+ register: osd_paths
+
+ - name: scan all OSD directories
+ command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: list all OSD JSON files
+ find:
+ paths: /etc/ceph/osd
+ file_type: file
+ register: osd_configs
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_configs.files }}"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/vagrant_variables.yml
new file mode 100644
index 00000000..b4aa759a
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-luks/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: ceph/ubuntu-xenial
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/Vagrantfile
new file mode 120000
index 00000000..16076e42
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/Vagrantfile
@@ -0,0 +1 @@
+../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/group_vars/all
new file mode 100644
index 00000000..edac61b2
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/group_vars/all
@@ -0,0 +1,22 @@
+---
+
+dmcrypt: True
+ceph_dev: True
+cluster: test
+public_network: "192.168.1.0/24"
+cluster_network: "192.168.2.0/24"
+monitor_interface: eth1
+journal_size: 100
+osd_objectstore: "filestore"
+ceph_origin: 'repository'
+ceph_repository: 'dev'
+copy_admin_key: false
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+ceph_conf_overrides:
+ global:
+ osd_pool_default_pg_num: 8
+ osd_pool_default_size: 1
+ osd:
+ osd_dmcrypt_type: plain
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/host_vars/osd0.yml
new file mode 100644
index 00000000..2e1c7ee9
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/host_vars/osd0.yml
@@ -0,0 +1,7 @@
+---
+
+devices:
+ - '/dev/sdb'
+dedicated_devices:
+ - '/dev/sdc'
+osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/host_vars/osd1.yml
new file mode 100644
index 00000000..7e90071c
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/host_vars/osd1.yml
@@ -0,0 +1,6 @@
+---
+
+devices:
+ - '/dev/sdb'
+ - '/dev/sdc'
+osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/hosts
new file mode 100644
index 00000000..e0c08b94
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/hosts
@@ -0,0 +1,9 @@
+[mons]
+mon0 monitor_interface=eth1
+
+[osds]
+osd0
+osd1
+
+[mgrs]
+mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/test.yml
new file mode 100644
index 00000000..24e2c035
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/test.yml
@@ -0,0 +1,31 @@
+---
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: list all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ file_type: directory
+ register: osd_paths
+
+ - name: scan all OSD directories
+ command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_paths.files }}"
+
+ - name: list all OSD JSON files
+ find:
+ paths: /etc/ceph/osd
+ file_type: file
+ register: osd_configs
+
+ - name: activate all scanned OSDs
+ command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+ with_items:
+ - "{{ osd_configs.files }}"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/vagrant_variables.yml
new file mode 100644
index 00000000..b4aa759a
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/simple/xenial/filestore/dmcrypt-plain/vagrant_variables.yml
@@ -0,0 +1,73 @@
+---
+
+# DEPLOY CONTAINERIZED DAEMONS
+docker: false
+
+# DEFINE THE NUMBER OF VMS TO RUN
+mon_vms: 1
+osd_vms: 2
+mds_vms: 0
+rgw_vms: 0
+nfs_vms: 0
+rbd_mirror_vms: 0
+client_vms: 0
+iscsi_gw_vms: 0
+mgr_vms: 0
+
+
+# INSTALL SOURCE OF CEPH
+# valid values are 'stable' and 'dev'
+ceph_install_source: stable
+
+# SUBNETS TO USE FOR THE VMS
+public_subnet: 192.168.1
+cluster_subnet: 192.168.2
+
+# MEMORY
+# set 1024 for CentOS
+memory: 512
+
+# Ethernet interface name
+# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
+eth: 'eth1'
+
+# Disks
+# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
+# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
+disks: "[ '/dev/sdb', '/dev/sdc' ]"
+
+# VAGRANT BOX
+# Ceph boxes are *strongly* suggested. They are under better control and will
+# not get updated frequently unless required for build systems. These are (for
+# now):
+#
+# * ceph/ubuntu-xenial
+#
+# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
+# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
+# libvirt CentOS: centos/7
+# parallels Ubuntu: parallels/ubuntu-14.04
+# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
+# For more boxes have a look at:
+# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
+# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
+vagrant_box: ceph/ubuntu-xenial
+#ssh_private_key_path: "~/.ssh/id_rsa"
+# The sync directory changes based on vagrant box
+# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
+#vagrant_sync_dir: /home/vagrant/sync
+#vagrant_sync_dir: /
+# Disables synced folder creation. Not needed for testing, will skip mounting
+# the vagrant directory on the remote box regardless of the provider.
+vagrant_disable_synced_folder: true
+# VAGRANT URL
+# This is a URL to download an image from an alternate location. vagrant_box
+# above should be set to the filename of the image.
+# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
+# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
+
+os_tuning_params:
+ - { name: kernel.pid_max, value: 4194303 }
+ - { name: fs.file-max, value: 26234859 }
+
diff --git a/src/ceph-volume/ceph_volume/tests/functional/tests/__init__.py b/src/ceph-volume/ceph_volume/tests/functional/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/tests/__init__.py
diff --git a/src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py b/src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py
new file mode 100644
index 00000000..17cc996e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/tests/conftest.py
@@ -0,0 +1,103 @@
+import pytest
+import os
+
+
+@pytest.fixture()
+def node(host, request):
+ """ This fixture represents a single node in the ceph cluster. Using the
+ host.ansible fixture provided by testinfra it can access all the ansible
+ variables provided to it by the specific test scenario being ran.
+
+ You must include this fixture on any tests that operate on specific type
+ of node because it contains the logic to manage which tests a node
+ should run.
+ """
+ ansible_vars = host.ansible.get_variables()
+ # tox/jenkins/user will pass in this environment variable. we need to do it this way
+ # because testinfra does not collect and provide ansible config passed in
+ # from using --extra-vars
+ ceph_dev_branch = os.environ.get("CEPH_DEV_BRANCH", "master")
+ group_names = ansible_vars["group_names"]
+ num_osd_ports = 4
+ if 'mimic' in ceph_dev_branch or 'luminous' in ceph_dev_branch:
+ num_osd_ports = 2
+
+ # capture the initial/default state
+ test_is_applicable = False
+ for marker in request.node.iter_markers():
+ if marker.name in group_names or marker.name == 'all':
+ test_is_applicable = True
+ break
+ # Check if any markers on the test method exist in the nodes group_names.
+ # If they do not, this test is not valid for the node being tested.
+ if not test_is_applicable:
+ reason = "%s: Not a valid test for node type: %s" % (
+ request.function, group_names)
+ pytest.skip(reason)
+
+ osd_ids = []
+ osds = []
+ cluster_address = ""
+ # I can assume eth1 because I know all the vagrant
+ # boxes we test with use that interface
+ address = host.interface("eth1").addresses[0]
+ subnet = ".".join(ansible_vars["public_network"].split(".")[0:-1])
+ num_mons = len(ansible_vars["groups"]["mons"])
+ num_osds = len(ansible_vars.get("devices", []))
+ if not num_osds:
+ num_osds = len(ansible_vars.get("lvm_volumes", []))
+ osds_per_device = ansible_vars.get("osds_per_device", 1)
+ num_osds = num_osds * osds_per_device
+
+ # If number of devices doesn't map to number of OSDs, allow tests to define
+ # that custom number, defaulting it to ``num_devices``
+ num_osds = ansible_vars.get('num_osds', num_osds)
+ cluster_name = ansible_vars.get("cluster", "ceph")
+ conf_path = "/etc/ceph/{}.conf".format(cluster_name)
+ if "osds" in group_names:
+ # I can assume eth2 because I know all the vagrant
+ # boxes we test with use that interface. OSDs are the only
+ # nodes that have this interface.
+ cluster_address = host.interface("eth2").addresses[0]
+ cmd = host.run('sudo ls /var/lib/ceph/osd/ | sed "s/.*-//"')
+ if cmd.rc == 0:
+ osd_ids = cmd.stdout.rstrip("\n").split("\n")
+ osds = osd_ids
+
+ data = dict(
+ address=address,
+ subnet=subnet,
+ vars=ansible_vars,
+ osd_ids=osd_ids,
+ num_mons=num_mons,
+ num_osds=num_osds,
+ num_osd_ports=num_osd_ports,
+ cluster_name=cluster_name,
+ conf_path=conf_path,
+ cluster_address=cluster_address,
+ osds=osds,
+ )
+ return data
+
+
+def pytest_collection_modifyitems(session, config, items):
+ for item in items:
+ test_path = item.location[0]
+ if "mon" in test_path:
+ item.add_marker(pytest.mark.mons)
+ elif "osd" in test_path:
+ item.add_marker(pytest.mark.osds)
+ elif "mds" in test_path:
+ item.add_marker(pytest.mark.mdss)
+ elif "mgr" in test_path:
+ item.add_marker(pytest.mark.mgrs)
+ elif "rbd-mirror" in test_path:
+ item.add_marker(pytest.mark.rbdmirrors)
+ elif "rgw" in test_path:
+ item.add_marker(pytest.mark.rgws)
+ elif "nfs" in test_path:
+ item.add_marker(pytest.mark.nfss)
+ elif "iscsi" in test_path:
+ item.add_marker(pytest.mark.iscsigws)
+ else:
+ item.add_marker(pytest.mark.all)
diff --git a/src/ceph-volume/ceph_volume/tests/functional/tests/osd/__init__.py b/src/ceph-volume/ceph_volume/tests/functional/tests/osd/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/tests/osd/__init__.py
diff --git a/src/ceph-volume/ceph_volume/tests/functional/tests/osd/test_osds.py b/src/ceph-volume/ceph_volume/tests/functional/tests/osd/test_osds.py
new file mode 100644
index 00000000..6d12babd
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/tests/osd/test_osds.py
@@ -0,0 +1,60 @@
+import json
+
+
+class TestOSDs(object):
+
+ def test_ceph_osd_package_is_installed(self, node, host):
+ assert host.package("ceph-osd").is_installed
+
+ def test_osds_listen_on_public_network(self, node, host):
+ # TODO: figure out way to paramaterize this test
+ nb_port = (node["num_osds"] * node["num_osd_ports"])
+ assert host.check_output(
+ "netstat -lntp | grep ceph-osd | grep %s | wc -l" % (node["address"])) == str(nb_port) # noqa E501
+
+ def test_osds_listen_on_cluster_network(self, node, host):
+ # TODO: figure out way to paramaterize this test
+ nb_port = (node["num_osds"] * node["num_osd_ports"])
+ assert host.check_output("netstat -lntp | grep ceph-osd | grep %s | wc -l" % # noqa E501
+ (node["cluster_address"])) == str(nb_port)
+
+ def test_osd_services_are_running(self, node, host):
+ # TODO: figure out way to paramaterize node['osds'] for this test
+ for osd in node["osds"]:
+ assert host.service("ceph-osd@%s" % osd).is_running
+
+ def test_osd_are_mounted(self, node, host):
+ # TODO: figure out way to paramaterize node['osd_ids'] for this test
+ for osd_id in node["osd_ids"]:
+ osd_path = "/var/lib/ceph/osd/{cluster}-{osd_id}".format(
+ cluster=node["cluster_name"],
+ osd_id=osd_id,
+ )
+ assert host.mount_point(osd_path).exists
+
+ def test_ceph_volume_is_installed(self, node, host):
+ host.exists('ceph-volume')
+
+ def test_ceph_volume_systemd_is_installed(self, node, host):
+ host.exists('ceph-volume-systemd')
+
+ def _get_osd_id_from_host(self, node, osd_tree):
+ children = []
+ for n in osd_tree['nodes']:
+ if n['name'] == node['vars']['inventory_hostname'] and n['type'] == 'host': # noqa E501
+ children = n['children']
+ return children
+
+ def _get_nb_up_osds_from_ids(self, node, osd_tree):
+ nb_up = 0
+ ids = self._get_osd_id_from_host(node, osd_tree)
+ for n in osd_tree['nodes']:
+ if n['id'] in ids and n['status'] == 'up':
+ nb_up += 1
+ return nb_up
+
+ def test_all_osds_are_up_and_in(self, node, host):
+ cmd = "sudo ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format( # noqa E501
+ cluster=node["cluster_name"])
+ output = json.loads(host.check_output(cmd))
+ assert node["num_osds"] == self._get_nb_up_osds_from_ids(node, output)
diff --git a/src/ceph-volume/ceph_volume/tests/systemd/test_main.py b/src/ceph-volume/ceph_volume/tests/systemd/test_main.py
new file mode 100644
index 00000000..0af52e8d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/systemd/test_main.py
@@ -0,0 +1,51 @@
+import pytest
+from ceph_volume import exceptions, conf
+from ceph_volume.systemd.main import parse_subcommand, main, process
+
+
+class TestParseSubcommand(object):
+
+ def test_no_subcommand_found(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ parse_subcommand('')
+
+ def test_sub_command_is_found(self):
+ result = parse_subcommand('lvm-1-sha-1-something-0')
+ assert result == 'lvm'
+
+
+class Capture(object):
+
+ def __init__(self, *a, **kw):
+ self.a = a
+ self.kw = kw
+ self.calls = []
+
+ def __call__(self, *a, **kw):
+ self.calls.append(a)
+ self.calls.append(kw)
+
+
+class TestMain(object):
+
+ def setup(self):
+ conf.log_path = '/tmp/'
+
+ def test_no_arguments_parsing_error(self):
+ with pytest.raises(RuntimeError):
+ main(args=[])
+
+ def test_parsing_suffix_error(self):
+ with pytest.raises(exceptions.SuffixParsingError):
+ main(args=['asdf'])
+
+ def test_correct_command(self, monkeypatch):
+ run = Capture()
+ monkeypatch.setattr(process, 'run', run)
+ main(args=['ceph-volume-systemd', 'lvm-8715BEB4-15C5-49DE-BA6F-401086EC7B41-0' ])
+ command = run.calls[0][0]
+ assert command == [
+ 'ceph-volume',
+ 'lvm', 'trigger',
+ '8715BEB4-15C5-49DE-BA6F-401086EC7B41-0'
+ ]
diff --git a/src/ceph-volume/ceph_volume/tests/systemd/test_systemctl.py b/src/ceph-volume/ceph_volume/tests/systemd/test_systemctl.py
new file mode 100644
index 00000000..8eec4a3d
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/systemd/test_systemctl.py
@@ -0,0 +1,21 @@
+import pytest
+from ceph_volume.systemd import systemctl
+
+class TestSystemctl(object):
+
+ @pytest.mark.parametrize("stdout,expected", [
+ (['Id=ceph-osd@1.service', '', 'Id=ceph-osd@2.service'], ['1','2']),
+ (['Id=ceph-osd1.service',], []),
+ (['Id=ceph-osd@1'], ['1']),
+ ([], []),
+ ])
+ def test_get_running_osd_ids(self, stub_call, stdout, expected):
+ stub_call((stdout, [], 0))
+ osd_ids = systemctl.get_running_osd_ids()
+ assert osd_ids == expected
+
+ def test_returns_empty_list_on_nonzero_return_code(self, stub_call):
+ stdout = ['Id=ceph-osd@1.service', '', 'Id=ceph-osd@2.service']
+ stub_call((stdout, [], 1))
+ osd_ids = systemctl.get_running_osd_ids()
+ assert osd_ids == []
diff --git a/src/ceph-volume/ceph_volume/tests/test_configuration.py b/src/ceph-volume/ceph_volume/tests/test_configuration.py
new file mode 100644
index 00000000..9af6cd9b
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/test_configuration.py
@@ -0,0 +1,117 @@
+import os
+try:
+ from cStringIO import StringIO
+except ImportError: # pragma: no cover
+ from io import StringIO # pragma: no cover
+from textwrap import dedent
+import pytest
+from ceph_volume import configuration, exceptions
+
+tabbed_conf = """
+[global]
+ default = 0
+ other_h = 1 # comment
+ other_c = 1 ; comment
+ colon = ;
+ hash = #
+"""
+
+
+class TestConf(object):
+
+ def setup(self):
+ self.conf_file = StringIO(dedent("""
+ [foo]
+ default = 0
+ """))
+
+ def test_get_non_existing_list(self):
+ cfg = configuration.Conf()
+ cfg.is_valid = lambda: True
+ cfg.read_conf(self.conf_file)
+ assert cfg.get_list('global', 'key') == []
+
+ def test_get_non_existing_list_get_default(self):
+ cfg = configuration.Conf()
+ cfg.is_valid = lambda: True
+ cfg.read_conf(self.conf_file)
+ assert cfg.get_list('global', 'key', ['a']) == ['a']
+
+ def test_get_rid_of_comments(self):
+ cfg = configuration.Conf()
+ cfg.is_valid = lambda: True
+ conf_file = StringIO(dedent("""
+ [foo]
+ default = 0 # this is a comment
+ """))
+
+ cfg.read_conf(conf_file)
+ assert cfg.get_list('foo', 'default') == ['0']
+
+ def test_gets_split_on_commas(self):
+ cfg = configuration.Conf()
+ cfg.is_valid = lambda: True
+ conf_file = StringIO(dedent("""
+ [foo]
+ default = 0,1,2,3 # this is a comment
+ """))
+
+ cfg.read_conf(conf_file)
+ assert cfg.get_list('foo', 'default') == ['0', '1', '2', '3']
+
+ def test_spaces_and_tabs_are_ignored(self):
+ cfg = configuration.Conf()
+ cfg.is_valid = lambda: True
+ conf_file = StringIO(dedent("""
+ [foo]
+ default = 0, 1, 2 ,3 # this is a comment
+ """))
+
+ cfg.read_conf(conf_file)
+ assert cfg.get_list('foo', 'default') == ['0', '1', '2', '3']
+
+
+class TestLoad(object):
+
+ def test_load_from_path(self, tmpdir):
+ conf_path = os.path.join(str(tmpdir), 'ceph.conf')
+ with open(conf_path, 'w') as conf:
+ conf.write(tabbed_conf)
+ result = configuration.load(conf_path)
+ assert result.get('global', 'default') == '0'
+
+ def test_load_with_colon_comments(self, tmpdir):
+ conf_path = os.path.join(str(tmpdir), 'ceph.conf')
+ with open(conf_path, 'w') as conf:
+ conf.write(tabbed_conf)
+ result = configuration.load(conf_path)
+ assert result.get('global', 'other_c') == '1'
+
+ def test_load_with_hash_comments(self, tmpdir):
+ conf_path = os.path.join(str(tmpdir), 'ceph.conf')
+ with open(conf_path, 'w') as conf:
+ conf.write(tabbed_conf)
+ result = configuration.load(conf_path)
+ assert result.get('global', 'other_h') == '1'
+
+ def test_path_does_not_exist(self):
+ with pytest.raises(exceptions.ConfigurationError):
+ conf = configuration.load('/path/does/not/exist/ceph.con')
+ conf.is_valid()
+
+ def test_unable_to_read_configuration(self, tmpdir, capsys):
+ ceph_conf = os.path.join(str(tmpdir), 'ceph.conf')
+ with open(ceph_conf, 'w') as config:
+ config.write(']broken] config\n[[')
+ with pytest.raises(RuntimeError):
+ configuration.load(ceph_conf)
+ stdout, stderr = capsys.readouterr()
+ assert 'File contains no section headers' in stderr
+
+ @pytest.mark.parametrize('commented', ['colon','hash'])
+ def test_coment_as_a_value(self, tmpdir, commented):
+ conf_path = os.path.join(str(tmpdir), 'ceph.conf')
+ with open(conf_path, 'w') as conf:
+ conf.write(tabbed_conf)
+ result = configuration.load(conf_path)
+ assert result.get('global', commented) == ''
diff --git a/src/ceph-volume/ceph_volume/tests/test_decorators.py b/src/ceph-volume/ceph_volume/tests/test_decorators.py
new file mode 100644
index 00000000..8df89145
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/test_decorators.py
@@ -0,0 +1,71 @@
+import os
+import pytest
+from ceph_volume import exceptions, decorators, terminal
+
+
+class TestNeedsRoot(object):
+
+ def test_is_root(self, monkeypatch):
+ def func():
+ return True
+ monkeypatch.setattr(decorators.os, 'getuid', lambda: 0)
+ assert decorators.needs_root(func)() is True
+
+ def test_is_not_root(self, monkeypatch):
+ def func():
+ return True # pragma: no cover
+ monkeypatch.setattr(decorators.os, 'getuid', lambda: 20)
+ with pytest.raises(exceptions.SuperUserError) as error:
+ decorators.needs_root(func)()
+
+ msg = 'This command needs to be executed with sudo or as root'
+ assert str(error.value) == msg
+
+
+class TestExceptionMessage(object):
+
+ def test_has_str_method(self):
+ result = decorators.make_exception_message(RuntimeError('an error'))
+ expected = "%s %s\n" % (terminal.red_arrow, 'RuntimeError: an error')
+ assert result == expected
+
+ def test_has_no_str_method(self):
+ class Error(Exception):
+ pass
+ result = decorators.make_exception_message(Error())
+ expected = "%s %s\n" % (terminal.red_arrow, 'Error')
+ assert result == expected
+
+
+class TestCatches(object):
+
+ def teardown(self):
+ try:
+ del(os.environ['CEPH_VOLUME_DEBUG'])
+ except KeyError:
+ pass
+
+ def test_ceph_volume_debug_enabled(self):
+ os.environ['CEPH_VOLUME_DEBUG'] = '1'
+ @decorators.catches() # noqa
+ def func():
+ raise RuntimeError()
+ with pytest.raises(RuntimeError):
+ func()
+
+ def test_ceph_volume_debug_disabled_no_exit(self, capsys):
+ @decorators.catches(exit=False)
+ def func():
+ raise RuntimeError()
+ func()
+ stdout, stderr = capsys.readouterr()
+ assert 'RuntimeError\n' in stderr
+
+ def test_ceph_volume_debug_exits(self, capsys):
+ @decorators.catches()
+ def func():
+ raise RuntimeError()
+ with pytest.raises(SystemExit):
+ func()
+ stdout, stderr = capsys.readouterr()
+ assert 'RuntimeError\n' in stderr
diff --git a/src/ceph-volume/ceph_volume/tests/test_inventory.py b/src/ceph-volume/ceph_volume/tests/test_inventory.py
new file mode 100644
index 00000000..9721fccd
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/test_inventory.py
@@ -0,0 +1,110 @@
+# -*- coding: utf-8 -*-
+
+import pytest
+from ceph_volume.util.device import Devices
+
+
+@pytest.fixture
+def device_report_keys(device_info):
+ device_info(devices={
+ # example output of disk.get_devices()
+ '/dev/sdb': {'human_readable_size': '1.82 TB',
+ 'locked': 0,
+ 'model': 'PERC H700',
+ 'nr_requests': '128',
+ 'partitions': {},
+ 'path': '/dev/sdb',
+ 'removable': '0',
+ 'rev': '2.10',
+ 'ro': '0',
+ 'rotational': '1',
+ 'sas_address': '',
+ 'sas_device_handle': '',
+ 'scheduler_mode': 'cfq',
+ 'sectors': 0,
+ 'sectorsize': '512',
+ 'size': 1999844147200.0,
+ 'support_discard': '',
+ 'vendor': 'DELL',
+ 'device_id': 'Vendor-Model-Serial'}
+ }
+ )
+ report = Devices().json_report()[0]
+ return list(report.keys())
+
+@pytest.fixture
+def device_sys_api_keys(device_info):
+ device_info(devices={
+ # example output of disk.get_devices()
+ '/dev/sdb': {'human_readable_size': '1.82 TB',
+ 'locked': 0,
+ 'model': 'PERC H700',
+ 'nr_requests': '128',
+ 'partitions': {},
+ 'path': '/dev/sdb',
+ 'removable': '0',
+ 'rev': '2.10',
+ 'ro': '0',
+ 'rotational': '1',
+ 'sas_address': '',
+ 'sas_device_handle': '',
+ 'scheduler_mode': 'cfq',
+ 'sectors': 0,
+ 'sectorsize': '512',
+ 'size': 1999844147200.0,
+ 'support_discard': '',
+ 'vendor': 'DELL'}
+ }
+ )
+ report = Devices().json_report()[0]
+ return list(report['sys_api'].keys())
+
+
+class TestInventory(object):
+
+ expected_keys = [
+ 'path',
+ 'rejected_reasons',
+ 'sys_api',
+ 'available',
+ 'lvs',
+ 'device_id',
+ ]
+
+ expected_sys_api_keys = [
+ 'human_readable_size',
+ 'locked',
+ 'model',
+ 'nr_requests',
+ 'partitions',
+ 'path',
+ 'removable',
+ 'rev',
+ 'ro',
+ 'rotational',
+ 'sas_address',
+ 'sas_device_handle',
+ 'scheduler_mode',
+ 'sectors',
+ 'sectorsize',
+ 'size',
+ 'support_discard',
+ 'vendor',
+ ]
+
+ def test_json_inventory_keys_unexpected(self, device_report_keys):
+ for k in device_report_keys:
+ assert k in self.expected_keys, "unexpected key {} in report".format(k)
+
+ def test_json_inventory_keys_missing(self, device_report_keys):
+ for k in self.expected_keys:
+ assert k in device_report_keys, "expected key {} in report".format(k)
+
+ def test_sys_api_keys_unexpected(self, device_sys_api_keys):
+ for k in device_sys_api_keys:
+ assert k in self.expected_sys_api_keys, "unexpected key {} in sys_api field".format(k)
+
+ def test_sys_api_keys_missing(self, device_sys_api_keys):
+ for k in self.expected_sys_api_keys:
+ assert k in device_sys_api_keys, "expected key {} in sys_api field".format(k)
+
diff --git a/src/ceph-volume/ceph_volume/tests/test_main.py b/src/ceph-volume/ceph_volume/tests/test_main.py
new file mode 100644
index 00000000..afe9a234
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/test_main.py
@@ -0,0 +1,69 @@
+import os
+import pytest
+from ceph_volume import main
+
+
+class TestVolume(object):
+
+ def test_main_spits_help_with_no_arguments(self, capsys):
+ with pytest.raises(SystemExit):
+ main.Volume(argv=[])
+ stdout, stderr = capsys.readouterr()
+ assert 'Log Path' in stdout
+
+ def test_warn_about_using_help_for_full_options(self, capsys):
+ with pytest.raises(SystemExit):
+ main.Volume(argv=[])
+ stdout, stderr = capsys.readouterr()
+ assert 'See "ceph-volume --help" for full list' in stdout
+
+ def test_environ_vars_show_up(self, capsys):
+ os.environ['CEPH_CONF'] = '/opt/ceph.conf'
+ with pytest.raises(SystemExit):
+ main.Volume(argv=[])
+ stdout, stderr = capsys.readouterr()
+ assert 'CEPH_CONF' in stdout
+ assert '/opt/ceph.conf' in stdout
+
+ def test_flags_are_parsed_with_help(self, capsys):
+ with pytest.raises(SystemExit):
+ main.Volume(argv=['ceph-volume', '--help'])
+ stdout, stderr = capsys.readouterr()
+ assert '--cluster' in stdout
+ assert '--log-path' in stdout
+
+ def test_log_ignoring_missing_ceph_conf(self, caplog):
+ with pytest.raises(SystemExit) as error:
+ main.Volume(argv=['ceph-volume', '--cluster', 'barnacle', 'lvm', '--help'])
+ # make sure we aren't causing an actual error
+ assert error.value.code == 0
+ log = caplog.records[-1]
+ assert log.message == 'ignoring inability to load ceph.conf'
+ assert log.levelname == 'ERROR'
+
+ def test_logs_current_command(self, caplog):
+ with pytest.raises(SystemExit) as error:
+ main.Volume(argv=['ceph-volume', '--cluster', 'barnacle', 'lvm', '--help'])
+ # make sure we aren't causing an actual error
+ assert error.value.code == 0
+ log = caplog.records[-2]
+ assert log.message == 'Running command: ceph-volume --cluster barnacle lvm --help'
+ assert log.levelname == 'INFO'
+
+ def test_logs_set_level_error(self, caplog):
+ with pytest.raises(SystemExit) as error:
+ main.Volume(argv=['ceph-volume', '--log-level', 'error', '--cluster', 'barnacle', 'lvm', '--help'])
+ # make sure we aren't causing an actual error
+ assert error.value.code == 0
+ assert caplog.records
+ # only log levels of 'ERROR' or above should be captured
+ for log in caplog.records:
+ assert log.levelname in ['ERROR', 'CRITICAL']
+
+ def test_logs_incorrect_log_level(self, capsys):
+ with pytest.raises(SystemExit) as error:
+ main.Volume(argv=['ceph-volume', '--log-level', 'foo', '--cluster', 'barnacle', 'lvm', '--help'])
+ # make sure this is an error
+ assert error.value.code != 0
+ stdout, stderr = capsys.readouterr()
+ assert "invalid choice" in stderr
diff --git a/src/ceph-volume/ceph_volume/tests/test_process.py b/src/ceph-volume/ceph_volume/tests/test_process.py
new file mode 100644
index 00000000..46e5c40e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/test_process.py
@@ -0,0 +1,92 @@
+import pytest
+import logging
+from ceph_volume.tests.conftest import Factory
+from ceph_volume import process
+
+
+@pytest.fixture
+def mock_call(monkeypatch):
+ """
+ Monkeypatches process.call, so that a caller can add behavior to the response
+ """
+ def apply(stdout=None, stderr=None, returncode=0):
+ stdout_stream = Factory(read=lambda: stdout)
+ stderr_stream = Factory(read=lambda: stderr)
+ return_value = Factory(
+ stdout=stdout_stream,
+ stderr=stderr_stream,
+ wait=lambda: returncode,
+ communicate=lambda x: (stdout, stderr, returncode)
+ )
+
+ monkeypatch.setattr(
+ 'ceph_volume.process.subprocess.Popen',
+ lambda *a, **kw: return_value)
+
+ return apply
+
+
+class TestCall(object):
+
+ def test_stderr_terminal_and_logfile(self, mock_call, caplog, capsys):
+ caplog.set_level(logging.INFO)
+ mock_call(stdout='stdout\n', stderr='some stderr message\n')
+ process.call(['ls'], terminal_verbose=True)
+ out, err = capsys.readouterr()
+ log_lines = [line[-1] for line in caplog.record_tuples]
+ assert 'Running command: ' in log_lines[0]
+ assert 'ls' in log_lines[0]
+ assert 'stderr some stderr message' in log_lines[-1]
+ assert 'some stderr message' in err
+
+ def test_stderr_terminal_and_logfile_off(self, mock_call, caplog, capsys):
+ caplog.set_level(logging.INFO)
+ mock_call(stdout='stdout\n', stderr='some stderr message\n')
+ process.call(['ls'], terminal_verbose=False)
+ out, err = capsys.readouterr()
+ log_lines = [line[-1] for line in caplog.record_tuples]
+ assert 'Running command: ' in log_lines[0]
+ assert 'ls' in log_lines[0]
+ assert 'stderr some stderr message' in log_lines[-1]
+ assert out == ''
+
+ def test_verbose_on_failure(self, mock_call, caplog, capsys):
+ caplog.set_level(logging.INFO)
+ mock_call(stdout='stdout\n', stderr='stderr\n', returncode=1)
+ process.call(['ls'], terminal_verbose=False, logfile_verbose=False)
+ out, err = capsys.readouterr()
+ log_lines = '\n'.join([line[-1] for line in caplog.record_tuples])
+ assert 'Running command: ' in log_lines
+ assert 'ls' in log_lines
+ assert 'stderr' in log_lines
+ assert 'stdout: stdout' in err
+ assert out == ''
+
+ def test_silent_verbose_on_failure(self, mock_call, caplog, capsys):
+ caplog.set_level(logging.INFO)
+ mock_call(stdout='stdout\n', stderr='stderr\n', returncode=1)
+ process.call(['ls'], verbose_on_failure=False)
+ out, err = capsys.readouterr()
+ log_lines = '\n'.join([line[-1] for line in caplog.record_tuples])
+ assert 'Running command: ' in log_lines
+ assert 'ls' in log_lines
+ assert 'stderr' in log_lines
+ assert out == ''
+
+
+class TestFunctionalCall(object):
+
+ def test_stdin(self):
+ process.call(['xargs', 'ls'], stdin="echo '/'")
+
+ def test_unicode_encoding(self):
+ process.call(['echo', u'\xd0'])
+
+ def test_unicode_encoding_stdin(self):
+ process.call(['echo'], stdin=u'\xd0'.encode('utf-8'))
+
+
+class TestFunctionalRun(object):
+
+ def test_log_descriptors(self):
+ process.run(['ls', '-l'])
diff --git a/src/ceph-volume/ceph_volume/tests/test_terminal.py b/src/ceph-volume/ceph_volume/tests/test_terminal.py
new file mode 100644
index 00000000..fdf21907
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/test_terminal.py
@@ -0,0 +1,143 @@
+# -*- mode:python; tab-width:4; indent-tabs-mode:nil; coding:utf-8 -*-
+
+import codecs
+import io
+try:
+ from io import StringIO
+except ImportError:
+ from StringIO import StringIO
+import pytest
+import sys
+from ceph_volume import terminal
+from ceph_volume.log import setup_console
+
+
+class SubCommand(object):
+
+ help = "this is the subcommand help"
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def main(self):
+ pass
+
+
+class BadSubCommand(object):
+
+ def __init__(self, argv):
+ self.argv = argv
+
+ def main(self):
+ raise SystemExit(100)
+
+
+class TestSubhelp(object):
+
+ def test_no_sub_command_help(self):
+ assert terminal.subhelp({}) == ''
+
+ def test_single_level_help(self):
+ result = terminal.subhelp({'sub': SubCommand})
+
+ assert 'this is the subcommand help' in result
+
+ def test_has_title_header(self):
+ result = terminal.subhelp({'sub': SubCommand})
+ assert 'Available subcommands:' in result
+
+ def test_command_with_no_help(self):
+ class SubCommandNoHelp(object):
+ pass
+ result = terminal.subhelp({'sub': SubCommandNoHelp})
+ assert result == ''
+
+
+class TestDispatch(object):
+
+ def test_no_subcommand_found(self):
+ result = terminal.dispatch({'sub': SubCommand}, argv=[])
+ assert result is None
+
+ def test_no_main_found(self):
+ class NoMain(object):
+
+ def __init__(self, argv):
+ pass
+ result = terminal.dispatch({'sub': NoMain}, argv=['sub'])
+ assert result is None
+
+ def test_subcommand_found_and_dispatched(self):
+ with pytest.raises(SystemExit) as error:
+ terminal.dispatch({'sub': SubCommand}, argv=['sub'])
+ assert str(error.value) == '0'
+
+ def test_subcommand_found_and_dispatched_with_errors(self):
+ with pytest.raises(SystemExit) as error:
+ terminal.dispatch({'sub': BadSubCommand}, argv=['sub'])
+ assert str(error.value) == '100'
+
+
+@pytest.fixture
+def stream():
+ def make_stream(buffer, encoding):
+ # mock a stdout with given encoding
+ if sys.version_info >= (3, 0):
+ stderr = sys.stderr
+ stream = io.TextIOWrapper(buffer,
+ encoding=encoding,
+ errors=stderr.errors,
+ newline=stderr.newlines,
+ line_buffering=stderr.line_buffering)
+ else:
+ stream = codecs.getwriter(encoding)(buffer)
+ # StreamWriter does not have encoding attached to it, it will ask
+ # the inner buffer for "encoding" attribute in this case
+ stream.encoding = encoding
+ return stream
+ return make_stream
+
+
+class TestWriteUnicode(object):
+
+ def setup(self):
+ self.octpus_and_squid_en = u'octpus and squid'
+ self.octpus_and_squid_zh = u'章鱼和鱿鱼'
+ self.message = self.octpus_and_squid_en + self.octpus_and_squid_zh
+ setup_console()
+
+ def test_stdout_writer(self, capsys):
+ # should work with whatever stdout is
+ terminal.stdout(self.message)
+ _, err = capsys.readouterr()
+ assert self.octpus_and_squid_en in err
+ assert self.octpus_and_squid_zh in err
+
+ @pytest.mark.parametrize('encoding', ['ascii', 'utf8'])
+ def test_writer_log(self, stream, encoding, monkeypatch, caplog):
+ writer = StringIO()
+ terminal._Write(_writer=writer).raw(self.message)
+ writer.flush()
+ writer.seek(0)
+ output = writer.readlines()[0]
+ assert self.octpus_and_squid_en in output
+
+ @pytest.mark.parametrize('encoding', ['utf8'])
+ def test_writer(self, encoding, stream, monkeypatch, capsys, caplog):
+ buffer = io.BytesIO()
+ writer = stream(buffer, encoding)
+ terminal._Write(_writer=writer).raw(self.message)
+ writer.flush()
+ writer.seek(0)
+ val = buffer.getvalue()
+ assert self.octpus_and_squid_en.encode(encoding) in val
+
+ def test_writer_uses_log_on_unicodeerror(self, stream, monkeypatch, capture):
+
+ if sys.version_info > (3,):
+ pytest.skip("Something breaks inside of pytest's capsys")
+ monkeypatch.setattr(terminal.terminal_logger, 'info', capture)
+ buffer = io.BytesIO()
+ writer = stream(buffer, 'ascii')
+ terminal._Write(_writer=writer).raw(self.message)
+ assert self.octpus_and_squid_en in capture.calls[0]['args'][0]
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py b/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py
new file mode 100644
index 00000000..d4565ef4
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py
@@ -0,0 +1,89 @@
+import argparse
+import pytest
+import os
+from ceph_volume import exceptions
+from ceph_volume.util import arg_validators
+
+
+class TestOSDPath(object):
+
+ def setup(self):
+ self.validator = arg_validators.OSDPath()
+
+ def test_is_not_root(self, monkeypatch):
+ monkeypatch.setattr(os, 'getuid', lambda: 100)
+ with pytest.raises(exceptions.SuperUserError):
+ self.validator('')
+
+ def test_path_is_not_a_directory(self, is_root, tmpfile, monkeypatch):
+ monkeypatch.setattr(arg_validators.disk, 'is_partition', lambda x: False)
+ validator = arg_validators.OSDPath()
+ with pytest.raises(argparse.ArgumentError):
+ validator(tmpfile())
+
+ def test_files_are_missing(self, is_root, tmpdir, monkeypatch):
+ tmppath = str(tmpdir)
+ monkeypatch.setattr(arg_validators.disk, 'is_partition', lambda x: False)
+ validator = arg_validators.OSDPath()
+ with pytest.raises(argparse.ArgumentError) as error:
+ validator(tmppath)
+ assert 'Required file (ceph_fsid) was not found in OSD' in str(error.value)
+
+
+class TestExcludeGroupOptions(object):
+
+ def setup(self):
+ self.parser = argparse.ArgumentParser()
+
+ def test_flags_in_one_group(self):
+ argv = ['<prog>', '--filestore', '--bar']
+ filestore_group = self.parser.add_argument_group('filestore')
+ bluestore_group = self.parser.add_argument_group('bluestore')
+ filestore_group.add_argument('--filestore')
+ bluestore_group.add_argument('--bluestore')
+ result = arg_validators.exclude_group_options(
+ self.parser,
+ ['filestore', 'bluestore'],
+ argv=argv
+ )
+ assert result is None
+
+ def test_flags_in_no_group(self):
+ argv = ['<prog>', '--foo', '--bar']
+ filestore_group = self.parser.add_argument_group('filestore')
+ bluestore_group = self.parser.add_argument_group('bluestore')
+ filestore_group.add_argument('--filestore')
+ bluestore_group.add_argument('--bluestore')
+ result = arg_validators.exclude_group_options(
+ self.parser,
+ ['filestore', 'bluestore'],
+ argv=argv
+ )
+ assert result is None
+
+ def test_flags_conflict(self, capsys):
+ argv = ['<prog>', '--filestore', '--bluestore']
+ filestore_group = self.parser.add_argument_group('filestore')
+ bluestore_group = self.parser.add_argument_group('bluestore')
+ filestore_group.add_argument('--filestore')
+ bluestore_group.add_argument('--bluestore')
+
+ arg_validators.exclude_group_options(
+ self.parser, ['filestore', 'bluestore'], argv=argv
+ )
+ stdout, stderr = capsys.readouterr()
+ assert 'Cannot use --filestore (filestore) with --bluestore (bluestore)' in stderr
+
+
+class TestValidDevice(object):
+
+ def setup(self):
+ self.validator = arg_validators.ValidDevice()
+
+ def test_path_is_valid(self, fake_call):
+ result = self.validator('/')
+ assert result.abspath == '/'
+
+ def test_path_is_invalid(self, fake_call):
+ with pytest.raises(argparse.ArgumentError):
+ self.validator('/device/does/not/exist')
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_device.py b/src/ceph-volume/ceph_volume/tests/util/test_device.py
new file mode 100644
index 00000000..7dd8982f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/util/test_device.py
@@ -0,0 +1,577 @@
+import pytest
+from copy import deepcopy
+from ceph_volume.util import device
+from ceph_volume.api import lvm as api
+
+
+class TestDevice(object):
+
+ def test_sys_api(self, monkeypatch, device_info):
+ volume = api.Volume(lv_name='lv', lv_uuid='y', vg_name='vg',
+ lv_tags={}, lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(volume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs:
+ deepcopy(volumes))
+
+ data = {"/dev/sda": {"foo": "bar"}}
+ lsblk = {"TYPE": "disk"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.sys_api
+ assert "foo" in disk.sys_api
+
+ def test_lvm_size(self, monkeypatch, device_info):
+ volume = api.Volume(lv_name='lv', lv_uuid='y', vg_name='vg',
+ lv_tags={}, lv_path='/dev/VolGroup/lv')
+ volumes = []
+ volumes.append(volume)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs:
+ deepcopy(volumes))
+
+ # 5GB in size
+ data = {"/dev/sda": {"size": "5368709120"}}
+ lsblk = {"TYPE": "disk"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.lvm_size.gb == 4
+
+ def test_lvm_size_rounds_down(self, device_info):
+ # 5.5GB in size
+ data = {"/dev/sda": {"size": "5905580032"}}
+ lsblk = {"TYPE": "disk"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.lvm_size.gb == 4
+
+ def test_is_lv(self, device_info):
+ data = {"lv_path": "vg/lv", "vg_name": "vg", "name": "lv"}
+ lsblk = {"TYPE": "lvm"}
+ device_info(lv=data,lsblk=lsblk)
+ disk = device.Device("vg/lv")
+ assert disk.is_lv
+
+ def test_vgs_is_empty(self, device_info, monkeypatch):
+ BarPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000",
+ pv_tags={})
+ pvolumes = []
+ pvolumes.append(BarPVolume)
+ lsblk = {"TYPE": "disk"}
+ device_info(lsblk=lsblk)
+ monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: {})
+
+ disk = device.Device("/dev/nvme0n1")
+ assert disk.vgs == []
+
+ def test_vgs_is_not_empty(self, device_info, monkeypatch):
+ vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=6,
+ vg_extent_size=1073741824)
+ monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
+ lsblk = {"TYPE": "disk"}
+ device_info(lsblk=lsblk)
+ disk = device.Device("/dev/nvme0n1")
+ assert len(disk.vgs) == 1
+
+ def test_device_is_device(self, device_info):
+ data = {"/dev/sda": {"foo": "bar"}}
+ lsblk = {"TYPE": "device"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.is_device is True
+
+ def test_device_is_rotational(self, device_info):
+ data = {"/dev/sda": {"rotational": "1"}}
+ lsblk = {"TYPE": "device"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.rotational
+
+ def test_device_is_not_rotational(self, device_info):
+ data = {"/dev/sda": {"rotational": "0"}}
+ lsblk = {"TYPE": "device"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert not disk.rotational
+
+ def test_device_is_rotational_lsblk(self, device_info):
+ data = {"/dev/sda": {"foo": "bar"}}
+ lsblk = {"TYPE": "device", "ROTA": "1"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.rotational
+
+ def test_device_is_not_rotational_lsblk(self, device_info):
+ data = {"/dev/sda": {"rotational": "0"}}
+ lsblk = {"TYPE": "device", "ROTA": "0"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert not disk.rotational
+
+ def test_device_is_rotational_defaults_true(self, device_info):
+ # rotational will default true if no info from sys_api or lsblk is found
+ data = {"/dev/sda": {"foo": "bar"}}
+ lsblk = {"TYPE": "device", "foo": "bar"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.rotational
+
+ def test_disk_is_device(self, device_info):
+ data = {"/dev/sda": {"foo": "bar"}}
+ lsblk = {"TYPE": "disk"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.is_device is True
+
+ def test_is_partition(self, device_info):
+ data = {"/dev/sda1": {"foo": "bar"}}
+ lsblk = {"TYPE": "part"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda1")
+ assert disk.is_partition
+
+ def test_is_not_acceptable_device(self, device_info):
+ data = {"/dev/dm-0": {"foo": "bar"}}
+ lsblk = {"TYPE": "mpath"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/dm-0")
+ assert not disk.is_device
+
+ def test_is_not_lvm_memeber(self, device_info):
+ data = {"/dev/sda1": {"foo": "bar"}}
+ lsblk = {"TYPE": "part"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda1")
+ assert not disk.is_lvm_member
+
+ def test_is_lvm_memeber(self, device_info):
+ data = {"/dev/sda1": {"foo": "bar"}}
+ lsblk = {"TYPE": "part"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/sda1")
+ assert not disk.is_lvm_member
+
+ def test_is_mapper_device(self, device_info):
+ lsblk = {"TYPE": "lvm"}
+ device_info(lsblk=lsblk)
+ disk = device.Device("/dev/mapper/foo")
+ assert disk.is_mapper
+
+ def test_dm_is_mapper_device(self, device_info):
+ lsblk = {"TYPE": "lvm"}
+ device_info(lsblk=lsblk)
+ disk = device.Device("/dev/dm-4")
+ assert disk.is_mapper
+
+ def test_is_not_mapper_device(self, device_info):
+ lsblk = {"TYPE": "disk"}
+ device_info(lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert not disk.is_mapper
+
+ @pytest.mark.usefixtures("lsblk_ceph_disk_member",
+ "disable_kernel_queries")
+ def test_is_ceph_disk_lsblk(self, monkeypatch, patch_bluestore_label):
+ disk = device.Device("/dev/sda")
+ assert disk.is_ceph_disk_member
+
+ @pytest.mark.usefixtures("blkid_ceph_disk_member",
+ "disable_kernel_queries")
+ def test_is_ceph_disk_blkid(self, monkeypatch, patch_bluestore_label):
+ disk = device.Device("/dev/sda")
+ assert disk.is_ceph_disk_member
+
+ @pytest.mark.usefixtures("lsblk_ceph_disk_member",
+ "disable_kernel_queries")
+ def test_is_ceph_disk_member_not_available_lsblk(self, monkeypatch, patch_bluestore_label):
+ disk = device.Device("/dev/sda")
+ assert disk.is_ceph_disk_member
+ assert not disk.available
+ assert "Used by ceph-disk" in disk.rejected_reasons
+
+ @pytest.mark.usefixtures("blkid_ceph_disk_member",
+ "disable_kernel_queries")
+ def test_is_ceph_disk_member_not_available_blkid(self, monkeypatch, patch_bluestore_label):
+ disk = device.Device("/dev/sda")
+ assert disk.is_ceph_disk_member
+ assert not disk.available
+ assert "Used by ceph-disk" in disk.rejected_reasons
+
+ def test_reject_removable_device(self, device_info):
+ data = {"/dev/sdb": {"removable": 1}}
+ lsblk = {"TYPE": "disk"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/sdb")
+ assert not disk.available
+
+ def test_accept_non_removable_device(self, device_info):
+ data = {"/dev/sdb": {"removable": 0, "size": 5368709120}}
+ lsblk = {"TYPE": "disk"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/sdb")
+ assert disk.available
+
+ def test_reject_not_acceptable_device(self, device_info):
+ data = {"/dev/dm-0": {"foo": "bar"}}
+ lsblk = {"TYPE": "mpath"}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/dm-0")
+ assert not disk.available
+
+ def test_reject_readonly_device(self, device_info):
+ data = {"/dev/cdrom": {"ro": 1}}
+ lsblk = {"TYPE": "disk"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/cdrom")
+ assert not disk.available
+
+ def test_reject_smaller_than_5gb(self, device_info):
+ data = {"/dev/sda": {"size": 5368709119}}
+ lsblk = {"TYPE": "disk"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert not disk.available, 'too small device is available'
+
+ def test_accept_non_readonly_device(self, device_info):
+ data = {"/dev/sda": {"ro": 0, "size": 5368709120}}
+ lsblk = {"TYPE": "disk"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.available
+
+ def test_reject_bluestore_device(self, monkeypatch, patch_bluestore_label, device_info):
+ patch_bluestore_label.return_value = True
+ lsblk = {"TYPE": "disk"}
+ device_info(lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert not disk.available
+ assert "Has BlueStore device label" in disk.rejected_reasons
+
+ @pytest.mark.usefixtures("device_info_not_ceph_disk_member",
+ "disable_kernel_queries")
+ def test_is_not_ceph_disk_member_lsblk(self, patch_bluestore_label):
+ disk = device.Device("/dev/sda")
+ assert disk.is_ceph_disk_member is False
+
+ def test_existing_vg_available(self, monkeypatch, device_info):
+ vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1536,
+ vg_extent_size=4194304)
+ monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
+ lsblk = {"TYPE": "disk"}
+ data = {"/dev/nvme0n1": {"size": "6442450944"}}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/nvme0n1")
+ assert disk.available_lvm
+ assert not disk.available
+ assert not disk.available_raw
+
+ def test_existing_vg_too_small(self, monkeypatch, device_info):
+ vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=4,
+ vg_extent_size=1073741824)
+ monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
+ lsblk = {"TYPE": "disk"}
+ data = {"/dev/nvme0n1": {"size": "6442450944"}}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/nvme0n1")
+ assert not disk.available_lvm
+ assert not disk.available
+ assert not disk.available_raw
+
+ def test_multiple_existing_vgs(self, monkeypatch, device_info):
+ vg1 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=1000,
+ vg_extent_size=4194304)
+ vg2 = api.VolumeGroup(vg_name='foo/bar', vg_free_count=536,
+ vg_extent_size=4194304)
+ monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg1, vg2])
+ lsblk = {"TYPE": "disk"}
+ data = {"/dev/nvme0n1": {"size": "6442450944"}}
+ device_info(devices=data, lsblk=lsblk)
+ disk = device.Device("/dev/nvme0n1")
+ assert disk.available_lvm
+ assert not disk.available
+ assert not disk.available_raw
+
+ @pytest.mark.parametrize("ceph_type", ["data", "block"])
+ def test_used_by_ceph(self, device_info,
+ monkeypatch, ceph_type):
+ data = {"/dev/sda": {"foo": "bar"}}
+ lsblk = {"TYPE": "part"}
+ FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000",
+ lv_uuid="0000", pv_tags={}, vg_name="vg")
+ pvolumes = []
+ pvolumes.append(FooPVolume)
+ lv_data = {"lv_name": "lv", "lv_path": "vg/lv", "vg_name": "vg",
+ "lv_uuid": "0000", "lv_tags":
+ "ceph.osd_id=0,ceph.type="+ceph_type}
+ volumes = []
+ lv = api.Volume(**lv_data)
+ volumes.append(lv)
+ monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: pvolumes)
+ monkeypatch.setattr(api, 'get_lvs', lambda **kwargs:
+ deepcopy(volumes))
+
+ device_info(devices=data, lsblk=lsblk, lv=lv_data)
+ vg = api.VolumeGroup(vg_name='foo/bar', vg_free_count=6,
+ vg_extent_size=1073741824)
+ monkeypatch.setattr(api, 'get_device_vgs', lambda x: [vg])
+ disk = device.Device("/dev/sda")
+ assert disk.used_by_ceph
+
+ def test_not_used_by_ceph(self, device_info, monkeypatch):
+ FooPVolume = api.PVolume(pv_name='/dev/sda', pv_uuid="0000", lv_uuid="0000", pv_tags={}, vg_name="vg")
+ pvolumes = []
+ pvolumes.append(FooPVolume)
+ data = {"/dev/sda": {"foo": "bar"}}
+ lsblk = {"TYPE": "part"}
+ lv_data = {"lv_path": "vg/lv", "vg_name": "vg", "lv_uuid": "0000", "tags": {"ceph.osd_id": 0, "ceph.type": "journal"}}
+ monkeypatch.setattr(api, 'get_pvs', lambda **kwargs: pvolumes)
+
+ device_info(devices=data, lsblk=lsblk, lv=lv_data)
+ disk = device.Device("/dev/sda")
+ assert not disk.used_by_ceph
+
+ def test_get_device_id(self, device_info):
+ udev = {k:k for k in ['ID_VENDOR', 'ID_MODEL', 'ID_SCSI_SERIAL']}
+ lsblk = {"TYPE": "disk"}
+ device_info(udevadm=udev,lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk._get_device_id() == 'ID_VENDOR_ID_MODEL_ID_SCSI_SERIAL'
+
+
+
+class TestDeviceEncryption(object):
+
+ def test_partition_is_not_encrypted_lsblk(self, device_info):
+ lsblk = {'TYPE': 'part', 'FSTYPE': 'xfs'}
+ device_info(lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.is_encrypted is False
+
+ def test_partition_is_encrypted_lsblk(self, device_info):
+ lsblk = {'TYPE': 'part', 'FSTYPE': 'crypto_LUKS'}
+ device_info(lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.is_encrypted is True
+
+ def test_partition_is_not_encrypted_blkid(self, device_info):
+ lsblk = {'TYPE': 'part'}
+ blkid = {'TYPE': 'ceph data'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/sda")
+ assert disk.is_encrypted is False
+
+ def test_partition_is_encrypted_blkid(self, device_info):
+ lsblk = {'TYPE': 'part'}
+ blkid = {'TYPE': 'crypto_LUKS'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/sda")
+ assert disk.is_encrypted is True
+
+ def test_mapper_is_encrypted_luks1(self, device_info, monkeypatch):
+ status = {'type': 'LUKS1'}
+ monkeypatch.setattr(device, 'encryption_status', lambda x: status)
+ lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
+ blkid = {'TYPE': 'mapper'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/mapper/uuid")
+ assert disk.is_encrypted is True
+
+ def test_mapper_is_encrypted_luks2(self, device_info, monkeypatch):
+ status = {'type': 'LUKS2'}
+ monkeypatch.setattr(device, 'encryption_status', lambda x: status)
+ lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
+ blkid = {'TYPE': 'mapper'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/mapper/uuid")
+ assert disk.is_encrypted is True
+
+ def test_mapper_is_encrypted_plain(self, device_info, monkeypatch):
+ status = {'type': 'PLAIN'}
+ monkeypatch.setattr(device, 'encryption_status', lambda x: status)
+ lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
+ blkid = {'TYPE': 'mapper'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/mapper/uuid")
+ assert disk.is_encrypted is True
+
+ def test_mapper_is_not_encrypted_plain(self, device_info, monkeypatch):
+ monkeypatch.setattr(device, 'encryption_status', lambda x: {})
+ lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
+ blkid = {'TYPE': 'mapper'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/mapper/uuid")
+ assert disk.is_encrypted is False
+
+ def test_lv_is_encrypted_blkid(self, device_info):
+ lsblk = {'TYPE': 'lvm'}
+ blkid = {'TYPE': 'crypto_LUKS'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/sda")
+ disk.lv_api = {}
+ assert disk.is_encrypted is True
+
+ def test_lv_is_not_encrypted_blkid(self, factory, device_info):
+ lsblk = {'TYPE': 'lvm'}
+ blkid = {'TYPE': 'xfs'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/sda")
+ disk.lv_api = factory(encrypted=None)
+ assert disk.is_encrypted is False
+
+ def test_lv_is_encrypted_lsblk(self, device_info):
+ lsblk = {'FSTYPE': 'crypto_LUKS', 'TYPE': 'lvm'}
+ blkid = {'TYPE': 'mapper'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/sda")
+ disk.lv_api = {}
+ assert disk.is_encrypted is True
+
+ def test_lv_is_not_encrypted_lsblk(self, factory, device_info):
+ lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
+ blkid = {'TYPE': 'mapper'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/sda")
+ disk.lv_api = factory(encrypted=None)
+ assert disk.is_encrypted is False
+
+ def test_lv_is_encrypted_lvm_api(self, factory, device_info):
+ lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
+ blkid = {'TYPE': 'mapper'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/sda")
+ disk.lv_api = factory(encrypted=True)
+ assert disk.is_encrypted is True
+
+ def test_lv_is_not_encrypted_lvm_api(self, factory, device_info):
+ lsblk = {'FSTYPE': 'xfs', 'TYPE': 'lvm'}
+ blkid = {'TYPE': 'mapper'}
+ device_info(lsblk=lsblk, blkid=blkid)
+ disk = device.Device("/dev/sda")
+ disk.lv_api = factory(encrypted=False)
+ assert disk.is_encrypted is False
+
+
+class TestDeviceOrdering(object):
+
+ def setup(self):
+ self.data = {
+ "/dev/sda": {"removable": 0},
+ "/dev/sdb": {"removable": 1}, # invalid
+ "/dev/sdc": {"removable": 0},
+ "/dev/sdd": {"removable": 1}, # invalid
+ }
+
+ def test_valid_before_invalid(self, device_info):
+ lsblk = {"TYPE": "disk"}
+ device_info(devices=self.data,lsblk=lsblk)
+ sda = device.Device("/dev/sda")
+ sdb = device.Device("/dev/sdb")
+
+ assert sda < sdb
+ assert sdb > sda
+
+ def test_valid_alphabetical_ordering(self, device_info):
+ lsblk = {"TYPE": "disk"}
+ device_info(devices=self.data,lsblk=lsblk)
+ sda = device.Device("/dev/sda")
+ sdc = device.Device("/dev/sdc")
+
+ assert sda < sdc
+ assert sdc > sda
+
+ def test_invalid_alphabetical_ordering(self, device_info):
+ lsblk = {"TYPE": "disk"}
+ device_info(devices=self.data,lsblk=lsblk)
+ sdb = device.Device("/dev/sdb")
+ sdd = device.Device("/dev/sdd")
+
+ assert sdb < sdd
+ assert sdd > sdb
+
+
+class TestCephDiskDevice(object):
+
+ def test_partlabel_lsblk(self, device_info):
+ lsblk = {"TYPE": "disk", "PARTLABEL": ""}
+ device_info(lsblk=lsblk)
+ disk = device.CephDiskDevice(device.Device("/dev/sda"))
+
+ assert disk.partlabel == ''
+
+ def test_partlabel_blkid(self, device_info):
+ blkid = {"TYPE": "disk", "PARTLABEL": "ceph data"}
+ device_info(blkid=blkid)
+ disk = device.CephDiskDevice(device.Device("/dev/sda"))
+
+ assert disk.partlabel == 'ceph data'
+
+ @pytest.mark.usefixtures("blkid_ceph_disk_member",
+ "disable_kernel_queries")
+ def test_is_member_blkid(self, monkeypatch, patch_bluestore_label):
+ disk = device.CephDiskDevice(device.Device("/dev/sda"))
+
+ assert disk.is_member is True
+
+ def test_reject_removable_device(self, device_info):
+ data = {"/dev/sdb": {"removable": 1}}
+ device_info(devices=data)
+ disk = device.Device("/dev/sdb")
+ assert not disk.available
+
+ def test_accept_non_removable_device(self, device_info):
+ data = {"/dev/sdb": {"removable": 0, "size": 5368709120}}
+ lsblk = {"TYPE": "disk"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/sdb")
+ assert disk.available
+
+ def test_reject_readonly_device(self, device_info):
+ data = {"/dev/cdrom": {"ro": 1}}
+ device_info(devices=data)
+ disk = device.Device("/dev/cdrom")
+ assert not disk.available
+
+ def test_reject_smaller_than_5gb(self, device_info):
+ data = {"/dev/sda": {"size": 5368709119}}
+ device_info(devices=data)
+ disk = device.Device("/dev/sda")
+ assert not disk.available, 'too small device is available'
+
+ def test_accept_non_readonly_device(self, device_info):
+ data = {"/dev/sda": {"ro": 0, "size": 5368709120}}
+ lsblk = {"TYPE": "disk"}
+ device_info(devices=data,lsblk=lsblk)
+ disk = device.Device("/dev/sda")
+ assert disk.available
+
+ @pytest.mark.usefixtures("lsblk_ceph_disk_member",
+ "disable_kernel_queries")
+ def test_is_member_lsblk(self, patch_bluestore_label, device_info):
+ lsblk = {"TYPE": "disk", "PARTLABEL": "ceph"}
+ device_info(lsblk=lsblk)
+ disk = device.CephDiskDevice(device.Device("/dev/sda"))
+
+ assert disk.is_member is True
+
+ def test_unknown_type(self, device_info):
+ lsblk = {"TYPE": "disk", "PARTLABEL": "gluster"}
+ device_info(lsblk=lsblk)
+ disk = device.CephDiskDevice(device.Device("/dev/sda"))
+
+ assert disk.type == 'unknown'
+
+ ceph_types = ['data', 'wal', 'db', 'lockbox', 'journal', 'block']
+
+ @pytest.mark.usefixtures("blkid_ceph_disk_member",
+ "disable_kernel_queries")
+ def test_type_blkid(self, monkeypatch, device_info, ceph_partlabel):
+ disk = device.CephDiskDevice(device.Device("/dev/sda"))
+
+ assert disk.type in self.ceph_types
+
+ @pytest.mark.usefixtures("blkid_ceph_disk_member",
+ "lsblk_ceph_disk_member",
+ "disable_kernel_queries")
+ def test_type_lsblk(self, device_info, ceph_partlabel):
+ disk = device.CephDiskDevice(device.Device("/dev/sda"))
+
+ assert disk.type in self.ceph_types
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_disk.py b/src/ceph-volume/ceph_volume/tests/util/test_disk.py
new file mode 100644
index 00000000..5f4d5734
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/util/test_disk.py
@@ -0,0 +1,540 @@
+import os
+import pytest
+from mock.mock import patch
+from ceph_volume.util import disk
+
+
+class TestLsblkParser(object):
+
+ def test_parses_whitespace_values(self):
+ output = 'NAME="sdaa5" PARTLABEL="ceph data" RM="0" SIZE="10M" RO="0" TYPE="part"'
+ result = disk._lsblk_parser(output)
+ assert result['PARTLABEL'] == 'ceph data'
+
+ def test_ignores_bogus_pairs(self):
+ output = 'NAME="sdaa5" PARTLABEL RM="0" SIZE="10M" RO="0" TYPE="part" MOUNTPOINT=""'
+ result = disk._lsblk_parser(output)
+ assert result['SIZE'] == '10M'
+
+
+class TestBlkidParser(object):
+
+ def test_parses_whitespace_values(self):
+ output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa
+ result = disk._blkid_parser(output)
+ assert result['PARTLABEL'] == 'ceph data'
+
+ def test_ignores_unmapped(self):
+ output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa
+ result = disk._blkid_parser(output)
+ assert len(result.keys()) == 4
+
+ def test_translates_to_partuuid(self):
+ output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa
+ result = disk._blkid_parser(output)
+ assert result['PARTUUID'] == 'b89c03bc-bf58-4338-a8f8-a2f484852b4f'
+
+
+class TestBlkid(object):
+
+ def test_parses_translated(self, stub_call):
+ output = '''/dev/sdb1: UUID="62416664-cbaf-40bd-9689-10bd337379c3" TYPE="xfs" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="ceph data" PART_ENTRY_UUID="b89c03bc-bf58-4338-a8f8-a2f484852b4f"''' # noqa
+ stub_call((output.split(), [], 0))
+ result = disk.blkid('/dev/sdb1')
+ assert result['PARTUUID'] == 'b89c03bc-bf58-4338-a8f8-a2f484852b4f'
+ assert result['PARTLABEL'] == 'ceph data'
+ assert result['UUID'] == '62416664-cbaf-40bd-9689-10bd337379c3'
+ assert result['TYPE'] == 'xfs'
+
+class TestUdevadmProperty(object):
+
+ def test_good_output(self, stub_call):
+ output = """ID_MODEL=SK_hynix_SC311_SATA_512GB
+ID_PART_TABLE_TYPE=gpt
+ID_SERIAL_SHORT=MS83N71801150416A""".split()
+ stub_call((output, [], 0))
+ result = disk.udevadm_property('dev/sda')
+ assert result['ID_MODEL'] == 'SK_hynix_SC311_SATA_512GB'
+ assert result['ID_PART_TABLE_TYPE'] == 'gpt'
+ assert result['ID_SERIAL_SHORT'] == 'MS83N71801150416A'
+
+ def test_property_filter(self, stub_call):
+ output = """ID_MODEL=SK_hynix_SC311_SATA_512GB
+ID_PART_TABLE_TYPE=gpt
+ID_SERIAL_SHORT=MS83N71801150416A""".split()
+ stub_call((output, [], 0))
+ result = disk.udevadm_property('dev/sda', ['ID_MODEL',
+ 'ID_SERIAL_SHORT'])
+ assert result['ID_MODEL'] == 'SK_hynix_SC311_SATA_512GB'
+ assert 'ID_PART_TABLE_TYPE' not in result
+
+ def test_fail_on_broken_output(self, stub_call):
+ output = ["ID_MODEL:SK_hynix_SC311_SATA_512GB"]
+ stub_call((output, [], 0))
+ with pytest.raises(ValueError):
+ disk.udevadm_property('dev/sda')
+
+
+class TestDeviceFamily(object):
+
+ def test_groups_multiple_devices(self, stub_call):
+ out = [
+ 'NAME="sdaa5" PARLABEL="ceph lockbox"',
+ 'NAME="sdaa" RO="0"',
+ 'NAME="sdaa1" PARLABEL="ceph data"',
+ 'NAME="sdaa2" PARLABEL="ceph journal"',
+ ]
+ stub_call((out, '', 0))
+ result = disk.device_family('sdaa5')
+ assert len(result) == 4
+
+ def test_parses_output_correctly(self, stub_call):
+ names = ['sdaa', 'sdaa5', 'sdaa1', 'sdaa2']
+ out = [
+ 'NAME="sdaa5" PARLABEL="ceph lockbox"',
+ 'NAME="sdaa" RO="0"',
+ 'NAME="sdaa1" PARLABEL="ceph data"',
+ 'NAME="sdaa2" PARLABEL="ceph journal"',
+ ]
+ stub_call((out, '', 0))
+ result = disk.device_family('sdaa5')
+ for parsed in result:
+ assert parsed['NAME'] in names
+
+
+class TestHumanReadableSize(object):
+
+ def test_bytes(self):
+ result = disk.human_readable_size(800)
+ assert result == '800.00 B'
+
+ def test_kilobytes(self):
+ result = disk.human_readable_size(800*1024)
+ assert result == '800.00 KB'
+
+ def test_megabytes(self):
+ result = disk.human_readable_size(800*1024*1024)
+ assert result == '800.00 MB'
+
+ def test_gigabytes(self):
+ result = disk.human_readable_size(8.19*1024*1024*1024)
+ assert result == '8.19 GB'
+
+ def test_terabytes(self):
+ result = disk.human_readable_size(81.2*1024*1024*1024*1024)
+ assert result == '81.20 TB'
+
+
+class TestSizeFromHumanReadable(object):
+
+ def test_bytes(self):
+ result = disk.size_from_human_readable('2')
+ assert result == disk.Size(b=2)
+
+ def test_kilobytes(self):
+ result = disk.size_from_human_readable('2 K')
+ assert result == disk.Size(kb=2)
+
+ def test_megabytes(self):
+ result = disk.size_from_human_readable('2 M')
+ assert result == disk.Size(mb=2)
+
+ def test_gigabytes(self):
+ result = disk.size_from_human_readable('2 G')
+ assert result == disk.Size(gb=2)
+
+ def test_terrabytes(self):
+ result = disk.size_from_human_readable('2 T')
+ assert result == disk.Size(tb=2)
+
+ def test_case(self):
+ result = disk.size_from_human_readable('2 t')
+ assert result == disk.Size(tb=2)
+
+ def test_space(self):
+ result = disk.size_from_human_readable('2T')
+ assert result == disk.Size(tb=2)
+
+ def test_float(self):
+ result = disk.size_from_human_readable('2.0')
+ assert result == disk.Size(b=2)
+ result = disk.size_from_human_readable('2.0T')
+ assert result == disk.Size(tb=2)
+ result = disk.size_from_human_readable('1.8T')
+ assert result == disk.Size(tb=1.8)
+
+
+class TestSizeParse(object):
+
+ def test_bytes(self):
+ result = disk.Size.parse('2')
+ assert result == disk.Size(b=2)
+
+ def test_kilobytes(self):
+ result = disk.Size.parse('2K')
+ assert result == disk.Size(kb=2)
+
+ def test_megabytes(self):
+ result = disk.Size.parse('2M')
+ assert result == disk.Size(mb=2)
+
+ def test_gigabytes(self):
+ result = disk.Size.parse('2G')
+ assert result == disk.Size(gb=2)
+
+ def test_terrabytes(self):
+ result = disk.Size.parse('2T')
+ assert result == disk.Size(tb=2)
+
+ def test_tb(self):
+ result = disk.Size.parse('2Tb')
+ assert result == disk.Size(tb=2)
+
+ def test_case(self):
+ result = disk.Size.parse('2t')
+ assert result == disk.Size(tb=2)
+
+ def test_space(self):
+ result = disk.Size.parse('2T')
+ assert result == disk.Size(tb=2)
+
+ def test_float(self):
+ result = disk.Size.parse('2.0')
+ assert result == disk.Size(b=2)
+ result = disk.Size.parse('2.0T')
+ assert result == disk.Size(tb=2)
+ result = disk.Size.parse('1.8T')
+ assert result == disk.Size(tb=1.8)
+
+
+class TestGetBlockDevsLsblk(object):
+
+ @patch('ceph_volume.process.call')
+ def test_return_structure(self, patched_call):
+ lsblk_stdout = [
+ '/dev/dm-0 /dev/mapper/ceph--8b2684eb--56ff--49e4--8f28--522e04cbd6ab-osd--data--9fc29fbf--3b5b--4066--be10--61042569b5a7 lvm',
+ '/dev/vda /dev/vda disk',
+ '/dev/vda1 /dev/vda1 part',
+ '/dev/vdb /dev/vdb disk',]
+ patched_call.return_value = (lsblk_stdout, '', 0)
+ disks = disk.get_block_devs_lsblk()
+ assert len(disks) == len(lsblk_stdout)
+ assert len(disks[0]) == 3
+
+ @patch('ceph_volume.process.call')
+ def test_empty_lsblk(self, patched_call):
+ patched_call.return_value = ([], '', 0)
+ disks = disk.get_block_devs_lsblk()
+ assert len(disks) == 0
+
+ @patch('ceph_volume.process.call')
+ def test_raise_on_failure(self, patched_call):
+ patched_call.return_value = ([], 'error', 1)
+ with pytest.raises(OSError):
+ disk.get_block_devs_lsblk()
+
+
+class TestGetDevices(object):
+
+ def setup_path(self, tmpdir):
+ path = os.path.join(str(tmpdir), 'block')
+ os.makedirs(path)
+ return path
+
+ def test_no_devices_are_found(self, tmpdir, patched_get_block_devs_lsblk):
+ patched_get_block_devs_lsblk.return_value = []
+ result = disk.get_devices(_sys_block_path=str(tmpdir))
+ assert result == {}
+
+ def test_sda_block_is_found(self, tmpdir, patched_get_block_devs_lsblk):
+ sda_path = '/dev/sda'
+ patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']]
+ block_path = self.setup_path(tmpdir)
+ os.makedirs(os.path.join(block_path, 'sda'))
+ result = disk.get_devices(_sys_block_path=block_path)
+ assert len(result.keys()) == 1
+ assert result[sda_path]['human_readable_size'] == '0.00 B'
+ assert result[sda_path]['model'] == ''
+ assert result[sda_path]['partitions'] == {}
+
+
+ def test_sda_size(self, tmpfile, tmpdir, patched_get_block_devs_lsblk):
+ sda_path = '/dev/sda'
+ patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']]
+ block_path = self.setup_path(tmpdir)
+ block_sda_path = os.path.join(block_path, 'sda')
+ os.makedirs(block_sda_path)
+ tmpfile('size', '1024', directory=block_sda_path)
+ result = disk.get_devices(_sys_block_path=block_path)
+ assert list(result.keys()) == [sda_path]
+ assert result[sda_path]['human_readable_size'] == '512.00 KB'
+
+ def test_sda_sectorsize_fallsback(self, tmpfile, tmpdir, patched_get_block_devs_lsblk):
+ # if no sectorsize, it will use queue/hw_sector_size
+ sda_path = '/dev/sda'
+ patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']]
+ block_path = self.setup_path(tmpdir)
+ block_sda_path = os.path.join(block_path, 'sda')
+ sda_queue_path = os.path.join(block_sda_path, 'queue')
+ os.makedirs(block_sda_path)
+ os.makedirs(sda_queue_path)
+ tmpfile('hw_sector_size', contents='1024', directory=sda_queue_path)
+ result = disk.get_devices(_sys_block_path=block_path)
+ assert list(result.keys()) == [sda_path]
+ assert result[sda_path]['sectorsize'] == '1024'
+
+ def test_sda_sectorsize_from_logical_block(self, tmpfile, tmpdir, patched_get_block_devs_lsblk):
+ sda_path = '/dev/sda'
+ patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']]
+ block_path = self.setup_path(tmpdir)
+ block_sda_path = os.path.join(block_path, 'sda')
+ sda_queue_path = os.path.join(block_sda_path, 'queue')
+ os.makedirs(block_sda_path)
+ os.makedirs(sda_queue_path)
+ tmpfile('logical_block_size', contents='99', directory=sda_queue_path)
+ result = disk.get_devices(_sys_block_path=block_path)
+ assert result[sda_path]['sectorsize'] == '99'
+
+ def test_sda_sectorsize_does_not_fallback(self, tmpfile, tmpdir, patched_get_block_devs_lsblk):
+ sda_path = '/dev/sda'
+ patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']]
+ block_path = self.setup_path(tmpdir)
+ block_sda_path = os.path.join(block_path, 'sda')
+ sda_queue_path = os.path.join(block_sda_path, 'queue')
+ os.makedirs(block_sda_path)
+ os.makedirs(sda_queue_path)
+ tmpfile('logical_block_size', contents='99', directory=sda_queue_path)
+ tmpfile('hw_sector_size', contents='1024', directory=sda_queue_path)
+ result = disk.get_devices(_sys_block_path=block_path)
+ assert result[sda_path]['sectorsize'] == '99'
+
+ def test_is_rotational(self, tmpfile, tmpdir, patched_get_block_devs_lsblk):
+ sda_path = '/dev/sda'
+ patched_get_block_devs_lsblk.return_value = [[sda_path, sda_path, 'disk']]
+ block_path = self.setup_path(tmpdir)
+ block_sda_path = os.path.join(block_path, 'sda')
+ sda_queue_path = os.path.join(block_sda_path, 'queue')
+ os.makedirs(block_sda_path)
+ os.makedirs(sda_queue_path)
+ tmpfile('rotational', contents='1', directory=sda_queue_path)
+ result = disk.get_devices(_sys_block_path=block_path)
+ assert result[sda_path]['rotational'] == '1'
+
+
+class TestSizeCalculations(object):
+
+ @pytest.mark.parametrize('aliases', [
+ ('b', 'bytes'),
+ ('kb', 'kilobytes'),
+ ('mb', 'megabytes'),
+ ('gb', 'gigabytes'),
+ ('tb', 'terabytes'),
+ ])
+ def test_aliases(self, aliases):
+ short_alias, long_alias = aliases
+ s = disk.Size(b=1)
+ short_alias = getattr(s, short_alias)
+ long_alias = getattr(s, long_alias)
+ assert short_alias == long_alias
+
+ @pytest.mark.parametrize('values', [
+ ('b', 857619069665.28),
+ ('kb', 837518622.72),
+ ('mb', 817889.28),
+ ('gb', 798.72),
+ ('tb', 0.78),
+ ])
+ def test_terabytes(self, values):
+ # regardless of the input value, all the other values correlate to each
+ # other the same, every time
+ unit, value = values
+ s = disk.Size(**{unit: value})
+ assert s.b == 857619069665.28
+ assert s.kb == 837518622.72
+ assert s.mb == 817889.28
+ assert s.gb == 798.72
+ assert s.tb == 0.78
+
+
+class TestSizeOperators(object):
+
+ @pytest.mark.parametrize('larger', [1025, 1024.1, 1024.001])
+ def test_gigabytes_is_smaller(self, larger):
+ assert disk.Size(gb=1) < disk.Size(mb=larger)
+
+ @pytest.mark.parametrize('smaller', [1023, 1023.9, 1023.001])
+ def test_gigabytes_is_larger(self, smaller):
+ assert disk.Size(gb=1) > disk.Size(mb=smaller)
+
+ @pytest.mark.parametrize('larger', [1025, 1024.1, 1024.001, 1024])
+ def test_gigabytes_is_smaller_or_equal(self, larger):
+ assert disk.Size(gb=1) <= disk.Size(mb=larger)
+
+ @pytest.mark.parametrize('smaller', [1023, 1023.9, 1023.001, 1024])
+ def test_gigabytes_is_larger_or_equal(self, smaller):
+ assert disk.Size(gb=1) >= disk.Size(mb=smaller)
+
+ @pytest.mark.parametrize('values', [
+ ('b', 857619069665.28),
+ ('kb', 837518622.72),
+ ('mb', 817889.28),
+ ('gb', 798.72),
+ ('tb', 0.78),
+ ])
+ def test_equality(self, values):
+ unit, value = values
+ s = disk.Size(**{unit: value})
+ # both tb and b, since b is always calculated regardless, and is useful
+ # when testing tb
+ assert disk.Size(tb=0.78) == s
+ assert disk.Size(b=857619069665.28) == s
+
+ @pytest.mark.parametrize('values', [
+ ('b', 857619069665.28),
+ ('kb', 837518622.72),
+ ('mb', 817889.28),
+ ('gb', 798.72),
+ ('tb', 0.78),
+ ])
+ def test_inequality(self, values):
+ unit, value = values
+ s = disk.Size(**{unit: value})
+ # both tb and b, since b is always calculated regardless, and is useful
+ # when testing tb
+ assert disk.Size(tb=1) != s
+ assert disk.Size(b=100) != s
+
+
+class TestSizeOperations(object):
+
+ def test_assignment_addition_with_size_objects(self):
+ result = disk.Size(mb=256) + disk.Size(gb=1)
+ assert result.gb == 1.25
+ assert result.gb.as_int() == 1
+ assert result.gb.as_float() == 1.25
+
+ def test_self_addition_with_size_objects(self):
+ base = disk.Size(mb=256)
+ base += disk.Size(gb=1)
+ assert base.gb == 1.25
+
+ def test_self_addition_does_not_alter_state(self):
+ base = disk.Size(mb=256)
+ base + disk.Size(gb=1)
+ assert base.mb == 256
+
+ def test_addition_with_non_size_objects(self):
+ with pytest.raises(TypeError):
+ disk.Size(mb=100) + 4
+
+ def test_assignment_subtraction_with_size_objects(self):
+ base = disk.Size(gb=1)
+ base -= disk.Size(mb=256)
+ assert base.mb == 768
+
+ def test_self_subtraction_does_not_alter_state(self):
+ base = disk.Size(gb=1)
+ base - disk.Size(mb=256)
+ assert base.gb == 1
+
+ def test_subtraction_with_size_objects(self):
+ result = disk.Size(gb=1) - disk.Size(mb=256)
+ assert result.mb == 768
+
+ def test_subtraction_with_non_size_objects(self):
+ with pytest.raises(TypeError):
+ disk.Size(mb=100) - 4
+
+ def test_multiplication_with_size_objects(self):
+ with pytest.raises(TypeError):
+ disk.Size(mb=100) * disk.Size(mb=1)
+
+ def test_multiplication_with_non_size_objects(self):
+ base = disk.Size(gb=1)
+ result = base * 2
+ assert result.gb == 2
+ assert result.gb.as_int() == 2
+
+ def test_division_with_size_objects(self):
+ result = disk.Size(gb=1) / disk.Size(mb=1)
+ assert int(result) == 1024
+
+ def test_division_with_non_size_objects(self):
+ base = disk.Size(gb=1)
+ result = base / 2
+ assert result.mb == 512
+ assert result.mb.as_int() == 512
+
+ def test_division_with_non_size_objects_without_state(self):
+ base = disk.Size(gb=1)
+ base / 2
+ assert base.gb == 1
+ assert base.gb.as_int() == 1
+
+
+class TestSizeAttributes(object):
+
+ def test_attribute_does_not_exist(self):
+ with pytest.raises(AttributeError):
+ disk.Size(mb=1).exabytes
+
+
+class TestSizeFormatting(object):
+
+ def test_default_formatting_tb_to_b(self):
+ size = disk.Size(tb=0.0000000001)
+ result = "%s" % size
+ assert result == "109.95 B"
+
+ def test_default_formatting_tb_to_kb(self):
+ size = disk.Size(tb=0.00000001)
+ result = "%s" % size
+ assert result == "10.74 KB"
+
+ def test_default_formatting_tb_to_mb(self):
+ size = disk.Size(tb=0.000001)
+ result = "%s" % size
+ assert result == "1.05 MB"
+
+ def test_default_formatting_tb_to_gb(self):
+ size = disk.Size(tb=0.001)
+ result = "%s" % size
+ assert result == "1.02 GB"
+
+ def test_default_formatting_tb_to_tb(self):
+ size = disk.Size(tb=10)
+ result = "%s" % size
+ assert result == "10.00 TB"
+
+
+class TestSizeSpecificFormatting(object):
+
+ def test_formatting_b(self):
+ size = disk.Size(b=2048)
+ result = "%s" % size.b
+ assert "%s" % size.b == "%s" % size.bytes
+ assert result == "2048.00 B"
+
+ def test_formatting_kb(self):
+ size = disk.Size(kb=5700)
+ result = "%s" % size.kb
+ assert "%s" % size.kb == "%s" % size.kilobytes
+ assert result == "5700.00 KB"
+
+ def test_formatting_mb(self):
+ size = disk.Size(mb=4000)
+ result = "%s" % size.mb
+ assert "%s" % size.mb == "%s" % size.megabytes
+ assert result == "4000.00 MB"
+
+ def test_formatting_gb(self):
+ size = disk.Size(gb=77777)
+ result = "%s" % size.gb
+ assert "%s" % size.gb == "%s" % size.gigabytes
+ assert result == "77777.00 GB"
+
+ def test_formatting_tb(self):
+ size = disk.Size(tb=1027)
+ result = "%s" % size.tb
+ assert "%s" % size.tb == "%s" % size.terabytes
+ assert result == "1027.00 TB"
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_encryption.py b/src/ceph-volume/ceph_volume/tests/util/test_encryption.py
new file mode 100644
index 00000000..e1420b44
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/util/test_encryption.py
@@ -0,0 +1,53 @@
+from ceph_volume.util import encryption
+
+
+class TestStatus(object):
+
+ def test_skips_unuseful_lines(self, stub_call):
+ out = ['some line here', ' device: /dev/sdc1']
+ stub_call((out, '', 0))
+ assert encryption.status('/dev/sdc1') == {'device': '/dev/sdc1'}
+
+ def test_removes_extra_quotes(self, stub_call):
+ out = ['some line here', ' device: "/dev/sdc1"']
+ stub_call((out, '', 0))
+ assert encryption.status('/dev/sdc1') == {'device': '/dev/sdc1'}
+
+ def test_ignores_bogus_lines(self, stub_call):
+ out = ['some line here', ' ']
+ stub_call((out, '', 0))
+ assert encryption.status('/dev/sdc1') == {}
+
+
+class TestDmcryptClose(object):
+
+ def test_mapper_exists(self, fake_run, tmpfile):
+ file_name = tmpfile(name='mapper-device')
+ encryption.dmcrypt_close(file_name)
+ arguments = fake_run.calls[0]['args'][0]
+ assert arguments[0] == 'cryptsetup'
+ assert arguments[1] == 'remove'
+ assert arguments[2].startswith('/')
+
+ def test_mapper_does_not_exist(self, fake_run):
+ file_name = '/path/does/not/exist'
+ encryption.dmcrypt_close(file_name)
+ assert fake_run.calls == []
+
+
+class TestDmcryptKey(object):
+
+ def test_dmcrypt_with_default_size(self, conf_ceph_stub):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ result = encryption.create_dmcrypt_key()
+ assert len(result) == 172
+
+ def test_dmcrypt_with_custom_size(self, conf_ceph_stub):
+ conf_ceph_stub('''
+ [global]
+ fsid=asdf
+ [osd]
+ osd_dmcrypt_size=8
+ ''')
+ result = encryption.create_dmcrypt_key()
+ assert len(result) == 172
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_prepare.py b/src/ceph-volume/ceph_volume/tests/util/test_prepare.py
new file mode 100644
index 00000000..ced5d49e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/util/test_prepare.py
@@ -0,0 +1,422 @@
+import pytest
+from textwrap import dedent
+import json
+from ceph_volume.util import prepare
+from ceph_volume.util.prepare import system
+from ceph_volume import conf
+from ceph_volume.tests.conftest import Factory
+
+
+class TestOSDIDAvailable(object):
+
+ def test_false_if_id_is_none(self):
+ assert not prepare.osd_id_available(None)
+
+ def test_returncode_is_not_zero(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: ('', '', 1))
+ with pytest.raises(RuntimeError):
+ prepare.osd_id_available(1)
+
+ def test_id_does_exist_but_not_available(self, monkeypatch):
+ stdout = dict(nodes=[
+ dict(id=0, status="up"),
+ ])
+ stdout = ['', json.dumps(stdout)]
+ monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: (stdout, '', 0))
+ result = prepare.osd_id_available(0)
+ assert not result
+
+ def test_id_does_not_exist(self, monkeypatch):
+ stdout = dict(nodes=[
+ dict(id=0),
+ ])
+ stdout = ['', json.dumps(stdout)]
+ monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: (stdout, '', 0))
+ result = prepare.osd_id_available(1)
+ assert not result
+
+ def test_invalid_osd_id(self, monkeypatch):
+ stdout = dict(nodes=[
+ dict(id=0),
+ ])
+ stdout = ['', json.dumps(stdout)]
+ monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: (stdout, '', 0))
+ result = prepare.osd_id_available("foo")
+ assert not result
+
+ def test_returns_true_when_id_is_destroyed(self, monkeypatch):
+ stdout = dict(nodes=[
+ dict(id=0, status="destroyed"),
+ ])
+ stdout = ['', json.dumps(stdout)]
+ monkeypatch.setattr('ceph_volume.process.call', lambda *a, **kw: (stdout, '', 0))
+ result = prepare.osd_id_available(0)
+ assert result
+
+
+class TestFormatDevice(object):
+
+ def test_include_force(self, fake_run, monkeypatch):
+ monkeypatch.setattr(conf, 'ceph', Factory(get_list=lambda *a, **kw: []))
+ prepare.format_device('/dev/sxx')
+ flags = fake_run.calls[0]['args'][0]
+ assert '-f' in flags
+
+ def test_device_is_always_appended(self, fake_run, conf_ceph):
+ conf_ceph(get_list=lambda *a, **kw: [])
+ prepare.format_device('/dev/sxx')
+ flags = fake_run.calls[0]['args'][0]
+ assert flags[-1] == '/dev/sxx'
+
+ def test_extra_flags_are_added(self, fake_run, conf_ceph):
+ conf_ceph(get_list=lambda *a, **kw: ['--why-yes'])
+ prepare.format_device('/dev/sxx')
+ flags = fake_run.calls[0]['args'][0]
+ assert '--why-yes' in flags
+
+ def test_default_options(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234"""))
+ conf.cluster = 'ceph'
+ prepare.format_device('/dev/sda1')
+ expected = [
+ 'mkfs', '-t', 'xfs',
+ '-f', '-i', 'size=2048', # default flags
+ '/dev/sda1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+ def test_multiple_options_are_used(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234
+ [osd]
+ osd mkfs options xfs = -f -i size=1024"""))
+ conf.cluster = 'ceph'
+ prepare.format_device('/dev/sda1')
+ expected = [
+ 'mkfs', '-t', 'xfs',
+ '-f', '-i', 'size=1024',
+ '/dev/sda1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+ def test_multiple_options_will_get_the_force_flag(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234
+ [osd]
+ osd mkfs options xfs = -i size=1024"""))
+ conf.cluster = 'ceph'
+ prepare.format_device('/dev/sda1')
+ expected = [
+ 'mkfs', '-t', 'xfs',
+ '-f', '-i', 'size=1024',
+ '/dev/sda1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+ def test_underscore_options_are_used(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234
+ [osd]
+ osd_mkfs_options_xfs = -i size=128"""))
+ conf.cluster = 'ceph'
+ prepare.format_device('/dev/sda1')
+ expected = [
+ 'mkfs', '-t', 'xfs',
+ '-f', '-i', 'size=128',
+ '/dev/sda1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+
+mkfs_filestore_flags = [
+ 'ceph-osd',
+ '--cluster',
+ '--osd-objectstore', 'filestore',
+ '--mkfs',
+ '-i',
+ '--monmap',
+ '--keyfile', '-', # goes through stdin
+ '--osd-data',
+ '--osd-journal',
+ '--osd-uuid',
+ '--setuser', 'ceph',
+ '--setgroup', 'ceph'
+]
+
+
+class TestOsdMkfsFilestore(object):
+
+ @pytest.mark.parametrize('flag', mkfs_filestore_flags)
+ def test_keyring_is_used(self, fake_call, monkeypatch, flag):
+ monkeypatch.setattr(prepare, '__release__', 'mimic')
+ monkeypatch.setattr(system, 'chown', lambda path: True)
+ prepare.osd_mkfs_filestore(1, 'asdf', keyring='secret')
+ assert flag in fake_call.calls[0]['args'][0]
+
+ def test_keyring_is_used_luminous(self, fake_call, monkeypatch):
+ monkeypatch.setattr(prepare, '__release__', 'luminous')
+ monkeypatch.setattr(system, 'chown', lambda path: True)
+ prepare.osd_mkfs_filestore(1, 'asdf', keyring='secret')
+ assert '--keyfile' not in fake_call.calls[0]['args'][0]
+
+
+class TestOsdMkfsBluestore(object):
+
+ def test_keyring_is_added(self, fake_call, monkeypatch):
+ monkeypatch.setattr(system, 'chown', lambda path: True)
+ prepare.osd_mkfs_bluestore(1, 'asdf', keyring='secret')
+ assert '--keyfile' in fake_call.calls[0]['args'][0]
+
+ def test_keyring_is_not_added(self, fake_call, monkeypatch):
+ monkeypatch.setattr(system, 'chown', lambda path: True)
+ prepare.osd_mkfs_bluestore(1, 'asdf')
+ assert '--keyfile' not in fake_call.calls[0]['args'][0]
+
+ def test_keyring_is_not_added_luminous(self, fake_call, monkeypatch):
+ monkeypatch.setattr(system, 'chown', lambda path: True)
+ prepare.osd_mkfs_bluestore(1, 'asdf')
+ monkeypatch.setattr(prepare, '__release__', 'luminous')
+ assert '--keyfile' not in fake_call.calls[0]['args'][0]
+
+ def test_wal_is_added(self, fake_call, monkeypatch):
+ monkeypatch.setattr(system, 'chown', lambda path: True)
+ prepare.osd_mkfs_bluestore(1, 'asdf', wal='/dev/smm1')
+ assert '--bluestore-block-wal-path' in fake_call.calls[0]['args'][0]
+ assert '/dev/smm1' in fake_call.calls[0]['args'][0]
+
+ def test_db_is_added(self, fake_call, monkeypatch):
+ monkeypatch.setattr(system, 'chown', lambda path: True)
+ prepare.osd_mkfs_bluestore(1, 'asdf', db='/dev/smm2')
+ assert '--bluestore-block-db-path' in fake_call.calls[0]['args'][0]
+ assert '/dev/smm2' in fake_call.calls[0]['args'][0]
+
+
+class TestMountOSD(object):
+
+ def test_default_options(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234"""))
+ conf.cluster = 'ceph'
+ prepare.mount_osd('/dev/sda1', 1)
+ expected = [
+ 'mount', '-t', 'xfs', '-o',
+ 'rw,noatime,inode64', # default flags
+ '/dev/sda1', '/var/lib/ceph/osd/ceph-1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+ def test_mount_options_are_used(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234
+ [osd]
+ osd mount options xfs = rw"""))
+ conf.cluster = 'ceph'
+ prepare.mount_osd('/dev/sda1', 1)
+ expected = [
+ 'mount', '-t', 'xfs', '-o',
+ 'rw',
+ '/dev/sda1', '/var/lib/ceph/osd/ceph-1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+ def test_multiple_whitespace_options_are_used(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234
+ [osd]
+ osd mount options xfs = rw auto exec"""))
+ conf.cluster = 'ceph'
+ prepare.mount_osd('/dev/sda1', 1)
+ expected = [
+ 'mount', '-t', 'xfs', '-o',
+ 'rw,auto,exec',
+ '/dev/sda1', '/var/lib/ceph/osd/ceph-1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+ def test_multiple_comma_whitespace_options_are_used(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234
+ [osd]
+ osd mount options xfs = rw, auto, exec"""))
+ conf.cluster = 'ceph'
+ prepare.mount_osd('/dev/sda1', 1)
+ expected = [
+ 'mount', '-t', 'xfs', '-o',
+ 'rw,auto,exec',
+ '/dev/sda1', '/var/lib/ceph/osd/ceph-1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+ def test_underscore_mount_options_are_used(self, conf_ceph_stub, fake_run):
+ conf_ceph_stub(dedent("""[global]
+ fsid = 1234lkjh1234
+ [osd]
+ osd mount options xfs = rw"""))
+ conf.cluster = 'ceph'
+ prepare.mount_osd('/dev/sda1', 1)
+ expected = [
+ 'mount', '-t', 'xfs', '-o',
+ 'rw',
+ '/dev/sda1', '/var/lib/ceph/osd/ceph-1']
+ assert expected == fake_run.calls[0]['args'][0]
+
+
+ceph_conf_mount_values = [
+ ['rw,', 'auto,' 'exec'],
+ ['rw', 'auto', 'exec'],
+ [' rw ', ' auto ', ' exec '],
+ ['rw,', 'auto,', 'exec,'],
+ [',rw ', ',auto ', ',exec,'],
+ [',rw,', ',auto,', ',exec,'],
+]
+
+string_mount_values = [
+ 'rw, auto exec ',
+ 'rw auto exec',
+ ',rw, auto, exec,',
+ ' rw auto exec ',
+ ' rw,auto,exec ',
+ 'rw,auto,exec',
+ ',rw,auto,exec,',
+ 'rw,auto,exec ',
+ 'rw, auto, exec ',
+]
+
+
+class TestNormalizeFlags(object):
+ # a bit overkill since most of this is already tested in prepare.mount_osd
+ # tests
+
+ @pytest.mark.parametrize("flags", ceph_conf_mount_values)
+ def test_normalize_lists(self, flags):
+ result = sorted(prepare._normalize_mount_flags(flags).split(','))
+ assert ','.join(result) == 'auto,exec,rw'
+
+ @pytest.mark.parametrize("flags", string_mount_values)
+ def test_normalize_strings(self, flags):
+ result = sorted(prepare._normalize_mount_flags(flags).split(','))
+ assert ','.join(result) == 'auto,exec,rw'
+
+ @pytest.mark.parametrize("flags", ceph_conf_mount_values)
+ def test_normalize_extra_flags(self, flags):
+ result = prepare._normalize_mount_flags(flags, extras=['discard'])
+ assert sorted(result.split(',')) == ['auto', 'discard', 'exec', 'rw']
+
+ @pytest.mark.parametrize("flags", ceph_conf_mount_values)
+ def test_normalize_duplicate_extra_flags(self, flags):
+ result = prepare._normalize_mount_flags(flags, extras=['rw', 'discard'])
+ assert sorted(result.split(',')) == ['auto', 'discard', 'exec', 'rw']
+
+ @pytest.mark.parametrize("flags", string_mount_values)
+ def test_normalize_strings_flags(self, flags):
+ result = sorted(prepare._normalize_mount_flags(flags, extras=['discard']).split(','))
+ assert ','.join(result) == 'auto,discard,exec,rw'
+
+ @pytest.mark.parametrize("flags", string_mount_values)
+ def test_normalize_strings_duplicate_flags(self, flags):
+ result = sorted(prepare._normalize_mount_flags(flags, extras=['discard','rw']).split(','))
+ assert ','.join(result) == 'auto,discard,exec,rw'
+
+
+class TestMkfsFilestore(object):
+
+ def test_non_zero_exit_status(self, stub_call, monkeypatch):
+ conf.cluster = 'ceph'
+ monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True)
+ stub_call(([], [], 1))
+ with pytest.raises(RuntimeError) as error:
+ prepare.osd_mkfs_filestore('1', 'asdf-1234', 'keyring')
+ assert "Command failed with exit code 1" in str(error.value)
+
+ def test_non_zero_exit_formats_command_correctly(self, stub_call, monkeypatch):
+ conf.cluster = 'ceph'
+ monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True)
+ stub_call(([], [], 1))
+ with pytest.raises(RuntimeError) as error:
+ prepare.osd_mkfs_filestore('1', 'asdf-1234', 'keyring')
+ expected = ' '.join([
+ 'ceph-osd',
+ '--cluster',
+ 'ceph',
+ '--osd-objectstore', 'filestore', '--mkfs',
+ '-i', '1', '--monmap', '/var/lib/ceph/osd/ceph-1/activate.monmap',
+ '--keyfile', '-', '--osd-data', '/var/lib/ceph/osd/ceph-1/',
+ '--osd-journal', '/var/lib/ceph/osd/ceph-1/journal',
+ '--osd-uuid', 'asdf-1234',
+ '--setuser', 'ceph', '--setgroup', 'ceph'])
+ assert expected in str(error.value)
+
+
+class TestMkfsBluestore(object):
+
+ def test_non_zero_exit_status(self, stub_call, monkeypatch):
+ conf.cluster = 'ceph'
+ monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True)
+ stub_call(([], [], 1))
+ with pytest.raises(RuntimeError) as error:
+ prepare.osd_mkfs_bluestore('1', 'asdf-1234', keyring='keyring')
+ assert "Command failed with exit code 1" in str(error.value)
+
+ def test_non_zero_exit_formats_command_correctly(self, stub_call, monkeypatch):
+ conf.cluster = 'ceph'
+ monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True)
+ stub_call(([], [], 1))
+ with pytest.raises(RuntimeError) as error:
+ prepare.osd_mkfs_bluestore('1', 'asdf-1234', keyring='keyring')
+ expected = ' '.join([
+ 'ceph-osd',
+ '--cluster',
+ 'ceph',
+ '--osd-objectstore', 'bluestore', '--mkfs',
+ '-i', '1', '--monmap', '/var/lib/ceph/osd/ceph-1/activate.monmap',
+ '--keyfile', '-', '--osd-data', '/var/lib/ceph/osd/ceph-1/',
+ '--osd-uuid', 'asdf-1234',
+ '--setuser', 'ceph', '--setgroup', 'ceph'])
+ assert expected in str(error.value)
+
+
+class TestGetJournalSize(object):
+
+ def test_undefined_size_fallbacks_formatted(self, conf_ceph_stub):
+ conf_ceph_stub(dedent("""
+ [global]
+ fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
+ """))
+ result = prepare.get_journal_size()
+ assert result == '5G'
+
+ def test_undefined_size_fallbacks_unformatted(self, conf_ceph_stub):
+ conf_ceph_stub(dedent("""
+ [global]
+ fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
+ """))
+ result = prepare.get_journal_size(lv_format=False)
+ assert result.gb.as_int() == 5
+
+ def test_defined_size_unformatted(self, conf_ceph_stub):
+ conf_ceph_stub(dedent("""
+ [global]
+ fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
+
+ [osd]
+ osd journal size = 10240
+ """))
+ result = prepare.get_journal_size(lv_format=False)
+ assert result.gb.as_int() == 10
+
+ def test_defined_size_formatted(self, conf_ceph_stub):
+ conf_ceph_stub(dedent("""
+ [global]
+ fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
+
+ [osd]
+ osd journal size = 10240
+ """))
+ result = prepare.get_journal_size()
+ assert result == '10G'
+
+ def test_refuse_tiny_journals(self, conf_ceph_stub):
+ conf_ceph_stub(dedent("""
+ [global]
+ fsid = a25d19a6-7d57-4eda-b006-78e35d2c4d9f
+
+ [osd]
+ osd journal size = 1024
+ """))
+ with pytest.raises(RuntimeError) as error:
+ prepare.get_journal_size()
+ assert 'journal sizes must be larger' in str(error.value)
+ assert 'detected: 1024.00 MB' in str(error.value)
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_system.py b/src/ceph-volume/ceph_volume/tests/util/test_system.py
new file mode 100644
index 00000000..74999949
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/util/test_system.py
@@ -0,0 +1,279 @@
+import os
+import pwd
+import getpass
+import pytest
+from textwrap import dedent
+from ceph_volume.util import system
+from mock.mock import patch
+
+
+class TestMkdirP(object):
+
+ def test_existing_dir_does_not_raise_w_chown(self, monkeypatch, tmpdir):
+ user = pwd.getpwnam(getpass.getuser())
+ uid, gid = user[2], user[3]
+ monkeypatch.setattr(system, 'get_ceph_user_ids', lambda: (uid, gid,))
+ path = str(tmpdir)
+ system.mkdir_p(path)
+ assert os.path.isdir(path)
+
+ def test_new_dir_w_chown(self, monkeypatch, tmpdir):
+ user = pwd.getpwnam(getpass.getuser())
+ uid, gid = user[2], user[3]
+ monkeypatch.setattr(system, 'get_ceph_user_ids', lambda: (uid, gid,))
+ path = os.path.join(str(tmpdir), 'new')
+ system.mkdir_p(path)
+ assert os.path.isdir(path)
+
+ def test_existing_dir_does_not_raise_no_chown(self, tmpdir):
+ path = str(tmpdir)
+ system.mkdir_p(path, chown=False)
+ assert os.path.isdir(path)
+
+ def test_new_dir_no_chown(self, tmpdir):
+ path = os.path.join(str(tmpdir), 'new')
+ system.mkdir_p(path, chown=False)
+ assert os.path.isdir(path)
+
+
+@pytest.fixture
+def fake_proc(tmpdir, monkeypatch):
+ PROCDIR = str(tmpdir)
+ proc_path = os.path.join(PROCDIR, 'mounts')
+ with open(proc_path, 'w') as f:
+ f.write(dedent("""nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
+ rootfs / rootfs rw 0 0
+ sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0
+ proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0
+ devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=238292k,nr_inodes=59573,mode=755 0 0
+ securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0
+ tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0
+ devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0
+ tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0
+ tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0
+ cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0
+ cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0
+ configfs /sys/kernel/config configfs rw,relatime 0 0
+ /dev/mapper/VolGroup00-LogVol00 / xfs rw,seclabel,relatime,attr2,inode64,noquota 0 0
+ selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0
+ debugfs /sys/kernel/debug debugfs rw,relatime 0 0
+ hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0
+ mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0
+ sunrpc /far/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0
+ /dev/sde4 /two/field/path
+ nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
+ /dev/sde2 /boot xfs rw,seclabel,relatime,attr2,inode64,noquota 0 0
+ tmpfs /far/lib/ceph/osd/ceph-5 tmpfs rw,seclabel,relatime 0 0
+ tmpfs /far/lib/ceph/osd/ceph-7 tmpfs rw,seclabel,relatime 0 0
+ /dev/sda1 /far/lib/ceph/osd/ceph-0 xfs rw,seclabel,noatime,attr2,inode64,noquota 0 0
+ tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=50040k,mode=700,uid=1000,gid=1000 0 0
+ /dev/sdc2 /boot xfs rw,seclabel,relatime,attr2,inode64,noquota 0 0
+ tmpfs /run/user/1000 tmpfs rw,seclabel,mode=700,uid=1000,gid=1000 0 0"""))
+ monkeypatch.setattr(system, 'PROCDIR', PROCDIR)
+ monkeypatch.setattr(os.path, 'exists', lambda x: True)
+
+
+class TestPathIsMounted(object):
+
+ def test_is_mounted(self, fake_proc):
+ assert system.path_is_mounted('/boot') is True
+
+ def test_is_not_mounted(self, fake_proc):
+ assert system.path_is_mounted('/far/fib/feph') is False
+
+ def test_is_not_mounted_at_destination(self, fake_proc):
+ assert system.path_is_mounted('/boot', destination='/dev/sda1') is False
+
+ def test_is_mounted_at_destination(self, fake_proc):
+ assert system.path_is_mounted('/boot', destination='/dev/sdc2') is True
+
+
+class TestDeviceIsMounted(object):
+
+ def test_is_mounted(self, fake_proc):
+ assert system.device_is_mounted('/dev/sda1') is True
+
+ def test_path_is_not_device(self, fake_proc):
+ assert system.device_is_mounted('/far/lib/ceph/osd/ceph-7') is False
+
+ def test_is_not_mounted_at_destination(self, fake_proc):
+ assert system.device_is_mounted('/dev/sda1', destination='/far/lib/ceph/osd/test-1') is False
+
+ def test_is_mounted_at_destination(self, fake_proc):
+ assert system.device_is_mounted('/dev/sda1', destination='/far/lib/ceph/osd/ceph-7') is False
+
+ def test_is_realpath_dev_mounted_at_destination(self, fake_proc, monkeypatch):
+ monkeypatch.setattr(system.os.path, 'realpath', lambda x: '/dev/sda1' if 'foo' in x else x)
+ result = system.device_is_mounted('/dev/maper/foo', destination='/far/lib/ceph/osd/ceph-0')
+ assert result is True
+
+ def test_is_realpath_path_mounted_at_destination(self, fake_proc, monkeypatch):
+ monkeypatch.setattr(
+ system.os.path, 'realpath',
+ lambda x: '/far/lib/ceph/osd/ceph-0' if 'symlink' in x else x)
+ result = system.device_is_mounted('/dev/sda1', destination='/symlink/lib/ceph/osd/ceph-0')
+ assert result is True
+
+
+class TestGetMounts(object):
+
+ def test_not_mounted(self, tmpdir, monkeypatch):
+ PROCDIR = str(tmpdir)
+ proc_path = os.path.join(PROCDIR, 'mounts')
+ with open(proc_path, 'w') as f:
+ f.write('')
+ monkeypatch.setattr(system, 'PROCDIR', PROCDIR)
+ assert system.get_mounts() == {}
+
+ def test_is_mounted_(self, fake_proc):
+ result = system.get_mounts()
+ assert result['/dev/sdc2'] == ['/boot']
+
+ def test_ignores_two_fields(self, fake_proc):
+ result = system.get_mounts()
+ assert result.get('/dev/sde4') is None
+
+ def test_tmpfs_is_reported(self, fake_proc):
+ result = system.get_mounts()
+ assert result['tmpfs'][0] == '/dev/shm'
+
+ def test_non_skip_devs_arent_reported(self, fake_proc):
+ result = system.get_mounts()
+ assert result.get('cgroup') is None
+
+ def test_multiple_mounts_are_appended(self, fake_proc):
+ result = system.get_mounts()
+ assert len(result['tmpfs']) == 7
+
+ def test_nonexistent_devices_are_skipped(self, tmpdir, monkeypatch):
+ PROCDIR = str(tmpdir)
+ proc_path = os.path.join(PROCDIR, 'mounts')
+ with open(proc_path, 'w') as f:
+ f.write(dedent("""nfsd /proc/fs/nfsd nfsd rw,relatime 0 0
+ /dev/sda1 /far/lib/ceph/osd/ceph-0 xfs rw,attr2,inode64,noquota 0 0
+ /dev/sda2 /far/lib/ceph/osd/ceph-1 xfs rw,attr2,inode64,noquota 0 0"""))
+ monkeypatch.setattr(system, 'PROCDIR', PROCDIR)
+ monkeypatch.setattr(os.path, 'exists', lambda x: False if x == '/dev/sda1' else True)
+ result = system.get_mounts()
+ assert result.get('/dev/sda1') is None
+
+
+class TestIsBinary(object):
+
+ def test_is_binary(self, tmpfile):
+ binary_path = tmpfile(contents='asd\n\nlkjh\x00')
+ assert system.is_binary(binary_path)
+
+ def test_is_not_binary(self, tmpfile):
+ binary_path = tmpfile(contents='asd\n\nlkjh0')
+ assert system.is_binary(binary_path) is False
+
+
+class TestGetFileContents(object):
+
+ def test_path_does_not_exist(self, tmpdir):
+ filepath = os.path.join(str(tmpdir), 'doesnotexist')
+ assert system.get_file_contents(filepath, 'default') == 'default'
+
+ def test_path_has_contents(self, tmpfile):
+ interesting_file = tmpfile(contents="1")
+ result = system.get_file_contents(interesting_file)
+ assert result == "1"
+
+ def test_path_has_multiline_contents(self, tmpfile):
+ interesting_file = tmpfile(contents="0\n1")
+ result = system.get_file_contents(interesting_file)
+ assert result == "0\n1"
+
+ def test_exception_returns_default(self, tmpfile):
+ interesting_file = tmpfile(contents="0")
+ # remove read, causes IOError
+ os.chmod(interesting_file, 0o000)
+ result = system.get_file_contents(interesting_file)
+ assert result == ''
+
+
+class TestWhich(object):
+
+ def test_executable_exists_but_is_not_file(self, monkeypatch):
+ monkeypatch.setattr(system.os.path, 'isfile', lambda x: False)
+ monkeypatch.setattr(system.os.path, 'exists', lambda x: True)
+ assert system.which('exedir') == 'exedir'
+
+ def test_executable_does_not_exist(self, monkeypatch):
+ monkeypatch.setattr(system.os.path, 'isfile', lambda x: False)
+ monkeypatch.setattr(system.os.path, 'exists', lambda x: False)
+ assert system.which('exedir') == 'exedir'
+
+ def test_executable_exists_as_file(self, monkeypatch):
+ monkeypatch.setattr(system.os, 'getenv', lambda x, y: '')
+ monkeypatch.setattr(system.os.path, 'isfile', lambda x: x != 'ceph')
+ monkeypatch.setattr(system.os.path, 'exists', lambda x: x != 'ceph')
+ assert system.which('ceph') == '/usr/local/bin/ceph'
+
+ def test_warnings_when_executable_isnt_matched(self, monkeypatch, capsys):
+ monkeypatch.setattr(system.os.path, 'isfile', lambda x: True)
+ monkeypatch.setattr(system.os.path, 'exists', lambda x: False)
+ system.which('exedir')
+ cap = capsys.readouterr()
+ assert 'Executable exedir not in PATH' in cap.err
+
+@pytest.fixture
+def stub_which(monkeypatch):
+ def apply(value='/bin/restorecon'):
+ monkeypatch.setattr(system, 'which', lambda x: value)
+ return apply
+
+
+# python2 has no FileNotFoundError
+try:
+ FileNotFoundError
+except NameError:
+ FileNotFoundError = OSError
+
+
+class TestSetContext(object):
+
+ def setup(self):
+ try:
+ os.environ.pop('CEPH_VOLUME_SKIP_RESTORECON')
+ except KeyError:
+ pass
+
+ @pytest.mark.parametrize('value', ['1', 'True', 'true', 'TRUE', 'yes'])
+ def test_set_context_skips(self, stub_call, fake_run, value):
+ stub_call(('', '', 0))
+ os.environ['CEPH_VOLUME_SKIP_RESTORECON'] = value
+ system.set_context('/tmp/foo')
+ assert fake_run.calls == []
+
+ @pytest.mark.parametrize('value', ['0', 'False', 'false', 'FALSE', 'no'])
+ def test_set_context_doesnt_skip_with_env(self, stub_call, stub_which, fake_run, value):
+ stub_call(('', '', 0))
+ stub_which()
+ os.environ['CEPH_VOLUME_SKIP_RESTORECON'] = value
+ system.set_context('/tmp/foo')
+ assert len(fake_run.calls)
+
+ def test_set_context_skips_on_executable(self, stub_call, stub_which, fake_run):
+ stub_call(('', '', 0))
+ stub_which('restorecon')
+ system.set_context('/tmp/foo')
+ assert fake_run.calls == []
+
+ def test_set_context_no_skip_on_executable(self, stub_call, stub_which, fake_run):
+ stub_call(('', '', 0))
+ stub_which('/bin/restorecon')
+ system.set_context('/tmp/foo')
+ assert len(fake_run.calls)
+
+ @patch('ceph_volume.process.call')
+ def test_selinuxenabled_doesnt_exist(self, mocked_call, fake_run):
+ mocked_call.side_effect = FileNotFoundError()
+ system.set_context('/tmp/foo')
+ assert fake_run.calls == []
+
+ def test_selinuxenabled_is_not_enabled(self, stub_call, fake_run):
+ stub_call(('', '', 1))
+ system.set_context('/tmp/foo')
+ assert fake_run.calls == []
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_util.py b/src/ceph-volume/ceph_volume/tests/util/test_util.py
new file mode 100644
index 00000000..1a094d33
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/util/test_util.py
@@ -0,0 +1,116 @@
+import pytest
+from ceph_volume import util
+
+
+class TestAsBytes(object):
+
+ def test_bytes_just_gets_returned(self):
+ bytes_string = "contents".encode('utf-8')
+ assert util.as_bytes(bytes_string) == bytes_string
+
+ def test_string_gets_converted_to_bytes(self):
+ result = util.as_bytes('contents')
+ assert isinstance(result, bytes)
+
+
+class TestStrToInt(object):
+
+ def test_passing_a_float_str_comma(self):
+ result = util.str_to_int("1,99")
+ assert result == 1
+
+ def test_passing_a_float_does_not_round_comma(self):
+ result = util.str_to_int("1,99", round_down=False)
+ assert result == 2
+
+ @pytest.mark.parametrize("value", ['2', 2])
+ def test_passing_an_int(self, value):
+ result = util.str_to_int(value)
+ assert result == 2
+
+ @pytest.mark.parametrize("value", ['1.99', 1.99])
+ def test_passing_a_float(self, value):
+ result = util.str_to_int(value)
+ assert result == 1
+
+ @pytest.mark.parametrize("value", ['1.99', 1.99])
+ def test_passing_a_float_does_not_round(self, value):
+ result = util.str_to_int(value, round_down=False)
+ assert result == 2
+
+ def test_text_is_not_an_integer_like(self):
+ with pytest.raises(RuntimeError) as error:
+ util.str_to_int("1.4GB")
+ assert str(error.value) == "Unable to convert to integer: '1.4GB'"
+
+ def test_input_is_not_string(self):
+ with pytest.raises(RuntimeError) as error:
+ util.str_to_int(None)
+ assert str(error.value) == "Unable to convert to integer: 'None'"
+
+
+def true_responses(upper_casing=False):
+ if upper_casing:
+ return ['Y', 'YES', '']
+ return ['y', 'yes', '']
+
+
+def false_responses(upper_casing=False):
+ if upper_casing:
+ return ['N', 'NO']
+ return ['n', 'no']
+
+
+def invalid_responses():
+ return [9, 0.1, 'h', [], {}, None]
+
+
+class TestStrToBool(object):
+
+ @pytest.mark.parametrize('response', true_responses())
+ def test_trueish(self, response):
+ assert util.str_to_bool(response) is True
+
+ @pytest.mark.parametrize('response', false_responses())
+ def test_falseish(self, response):
+ assert util.str_to_bool(response) is False
+
+ @pytest.mark.parametrize('response', true_responses(True))
+ def test_trueish_upper(self, response):
+ assert util.str_to_bool(response) is True
+
+ @pytest.mark.parametrize('response', false_responses(True))
+ def test_falseish_upper(self, response):
+ assert util.str_to_bool(response) is False
+
+ @pytest.mark.parametrize('response', invalid_responses())
+ def test_invalid(self, response):
+ with pytest.raises(ValueError):
+ util.str_to_bool(response)
+
+
+class TestPromptBool(object):
+
+ @pytest.mark.parametrize('response', true_responses())
+ def test_trueish(self, response):
+ fake_input = lambda x: response
+ qx = 'what the what?'
+ assert util.prompt_bool(qx, input_=fake_input) is True
+
+ @pytest.mark.parametrize('response', false_responses())
+ def test_falseish(self, response):
+ fake_input = lambda x: response
+ qx = 'what the what?'
+ assert util.prompt_bool(qx, input_=fake_input) is False
+
+ def test_try_again_true(self):
+ responses = ['g', 'h', 'y']
+ fake_input = lambda x: responses.pop(0)
+ qx = 'what the what?'
+ assert util.prompt_bool(qx, input_=fake_input) is True
+
+ def test_try_again_false(self):
+ responses = ['g', 'h', 'n']
+ fake_input = lambda x: responses.pop(0)
+ qx = 'what the what?'
+ assert util.prompt_bool(qx, input_=fake_input) is False