From e6918187568dbd01842d8d1d2c808ce16a894239 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 21 Apr 2024 13:54:28 +0200 Subject: Adding upstream version 18.2.2. Signed-off-by: Daniel Baumann --- .../lvm/centos8/bluestore/create/Vagrantfile | 1 + .../lvm/centos8/bluestore/create/group_vars/all | 1 + .../functional/lvm/centos8/bluestore/create/hosts | 8 + .../lvm/centos8/bluestore/create/setup.yml | 1 + .../lvm/centos8/bluestore/create/test.yml | 1 + .../centos8/bluestore/create/vagrant_variables.yml | 1 + .../lvm/centos8/bluestore/dmcrypt/Vagrantfile | 1 + .../lvm/centos8/bluestore/dmcrypt/group_vars/all | 1 + .../functional/lvm/centos8/bluestore/dmcrypt/hosts | 8 + .../lvm/centos8/bluestore/dmcrypt/setup.yml | 1 + .../lvm/centos8/bluestore/dmcrypt/test.yml | 123 ++++++++++++++++ .../bluestore/dmcrypt/vagrant_variables.yml | 1 + .../functional/lvm/playbooks/setup_partitions.yml | 27 ++++ .../functional/lvm/playbooks/test_bluestore.yml | 161 +++++++++++++++++++++ .../ceph_volume/tests/functional/lvm/tox.ini | 71 +++++++++ 15 files changed, 407 insertions(+) create mode 120000 src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/Vagrantfile create mode 120000 src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/group_vars/all create mode 100644 src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/hosts create mode 120000 src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/setup.yml create mode 120000 src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/test.yml create mode 120000 src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/vagrant_variables.yml create mode 120000 src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/Vagrantfile create mode 120000 src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/group_vars/all create mode 100644 src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/hosts create mode 120000 src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/setup.yml create mode 100644 src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/test.yml create mode 120000 src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/vagrant_variables.yml create mode 100644 src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml create mode 100644 src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml create mode 100644 src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini (limited to 'src/ceph-volume/ceph_volume/tests/functional/lvm') diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/Vagrantfile new file mode 120000 index 000000000..16076e424 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/group_vars/all new file mode 120000 index 000000000..5a7af3be0 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/group_vars/all @@ -0,0 +1 @@ +../../../../../group_vars/bluestore_lvm \ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/hosts new file mode 100644 index 000000000..e1c1de6f8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/setup.yml new file mode 120000 index 000000000..1c1a3ce8d --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml \ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/test.yml new file mode 120000 index 000000000..165d9da29 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/test.yml @@ -0,0 +1 @@ +../../../playbooks/test_bluestore.yml \ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/vagrant_variables.yml new file mode 120000 index 000000000..d21531f6c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/vagrant_variables.yml @@ -0,0 +1 @@ +../../../../vagrant_variables.yml \ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/Vagrantfile new file mode 120000 index 000000000..16076e424 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile \ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/group_vars/all new file mode 120000 index 000000000..6ef6a9844 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/group_vars/all @@ -0,0 +1 @@ +../../../../../group_vars/bluestore_lvm_dmcrypt \ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/hosts new file mode 100644 index 000000000..e1c1de6f8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/setup.yml new file mode 120000 index 000000000..1c1a3ce8d --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml \ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/test.yml new file mode 100644 index 000000000..0a47b5eb8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/test.yml @@ -0,0 +1,123 @@ +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + +- hosts: mons + become: yes + tasks: + - name: mark osds down + command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}" + with_items: + - 0 + - 2 + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + register: result + retries: 30 + delay: 1 + until: result is succeeded + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + register: result + retries: 30 + delay: 1 + until: result is succeeded + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/vdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/vdd for lvm data usage + parted: + device: /dev/vdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: redeploy osd.2 using /dev/vdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 lv + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: redeploy osd.0 using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + - name: mark osds down + command: "ceph --cluster {{ cluster }} osd down osd.0" + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + register: result + retries: 30 + delay: 1 + until: result is succeeded + + +- hosts: osds + become: yes + tasks: + + + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: prepare osd.0 using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/vagrant_variables.yml new file mode 120000 index 000000000..d21531f6c --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/vagrant_variables.yml @@ -0,0 +1 @@ +../../../../vagrant_variables.yml \ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml new file mode 100644 index 000000000..4b9e6638e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml @@ -0,0 +1,27 @@ +--- + +- hosts: osds + gather_facts: false + become: yes + tasks: + + - name: partition /dev/vdd for lvm data usage + parted: + device: /dev/vdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: partition /dev/vdd lvm journals + parted: + device: /dev/vdd + number: 2 + part_start: 50% + part_end: 100% + unit: '%' + state: present + label: gpt + diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml new file mode 100644 index 000000000..97d77a7f4 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml @@ -0,0 +1,161 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + - name: mark osds down + command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}" + with_items: + - 0 + - 2 + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + register: result + retries: 30 + delay: 1 + until: result is succeeded + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + register: result + retries: 30 + delay: 1 + until: result is succeeded + + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/vdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/vdd for lvm data usage + parted: + device: /dev/vdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: redeploy osd.2 using /dev/vdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 device (zap without --destroy that removes the LV) + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: prepare osd.0 again using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: find all OSD directories + find: + paths: /var/lib/ceph/osd + recurse: no + file_type: directory + register: osd_directories + + - name: find all OSD symlinks + find: + paths: /var/lib/ceph/osd + recurse: yes + depth: 2 + file_type: link + register: osd_symlinks + + # set the OSD dir and the block/block.db links to root:root permissions, to + # ensure that the OSD will be able to activate regardless + - file: + path: "{{ item.path }}" + owner: root + group: root + with_items: + - "{{ osd_directories.files }}" + + - file: + path: "{{ item.path }}" + owner: root + group: root + with_items: + - "{{ osd_symlinks.files }}" + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: create temporary directory + tempfile: + state: directory + suffix: sparse + register: tmpdir + + - name: create a 1GB sparse file + command: fallocate -l 1G {{ tmpdir.path }}/sparse.file + + - name: find an empty loop device + command: losetup -f + register: losetup_list + + - name: setup loop device with sparse file + command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file + + - name: create volume group + command: vgcreate test_zap {{ losetup_list.stdout }} + failed_when: false + + - name: create logical volume 1 + command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap + failed_when: false + + - name: create logical volume 2 + command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap + failed_when: false + + # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed + - name: zap test_zap/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap test_zap/data-lv2 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini new file mode 100644 index 000000000..49c969059 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini @@ -0,0 +1,71 @@ +[tox] +envlist = centos8-bluestore-{create,prepare_activate,dmcrypt} +skipsdist = True + +[testenv] +deps = mock +allowlist_externals = + vagrant + bash + git + cp + sleep +passenv=* +setenv= + ANSIBLE_CONFIG = {envdir}/tmp/ceph-ansible/ansible.cfg + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey + ANSIBLE_STDOUT_CALLBACK = debug + VAGRANT_CWD = {changedir} + CEPH_VOLUME_DEBUG = 1 + DEBIAN_FRONTEND=noninteractive +changedir= + # plain/unencrypted + centos8-bluestore-create: {toxinidir}/centos8/bluestore/create + # dmcrypt + centos8-bluestore-dmcrypt: {toxinidir}/centos8/bluestore/dmcrypt + # TODO: these are placeholders for now, eventually we want to + # test the prepare/activate workflow of ceph-volume as well + centos8-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate +commands= + git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch {env:CEPH_ANSIBLE_CLONE:"https://github.com/ceph/ceph-ansible.git"} {envdir}/tmp/ceph-ansible + pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt + ansible-galaxy install -r {envdir}/tmp/ceph-ansible/requirements.yml -v + + bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} + bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir} + + # create logical volumes to test with on the vms + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml + + ansible -vv -i {changedir}/hosts all -b -m package -a 'name=rpm state=latest' + + # ad-hoc/local test setup for lvm + ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml + + cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible + + # use ceph-ansible to deploy a ceph cluster on the vms + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}" + + # prepare nodes for testing with testinfra + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml + + # test cluster state using testinfra + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + + # reboot all vms - attempt + bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} + + # after a reboot, osds may take about 20 seconds to come back up + sleep 30 + + # retest to ensure cluster came back up correctly after rebooting + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + + # destroy an OSD, zap it's device and recreate it using it's ID + ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml + + # retest to ensure cluster came back up correctly + py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + + vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"} -- cgit v1.2.3