From 19fcec84d8d7d21e796c7624e521b60d28ee21ed Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:45:59 +0200 Subject: Adding upstream version 16.2.11+ds. Signed-off-by: Daniel Baumann --- .../lvm/centos8/filestore/dmcrypt/test.yml | 120 +++++++++++++++++++++ 1 file changed, 120 insertions(+) create mode 100644 src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/test.yml (limited to 'src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/test.yml') diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/test.yml new file mode 100644 index 000000000..21eff00fa --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/filestore/dmcrypt/test.yml @@ -0,0 +1,120 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + - name: mark osds down + command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}" + with_items: + - 0 + - 2 + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + register: result + retries: 30 + delay: 1 + until: result is succeeded + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + register: result + retries: 30 + delay: 1 + until: result is succeeded + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/vdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap /dev/vdd2 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/vdd for lvm data usage + parted: + device: /dev/vdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: re-create partition /dev/vdd lvm journals + parted: + device: /dev/vdd + number: 2 + part_start: 50% + part_end: 100% + unit: '%' + state: present + label: gpt + + - name: redeploy osd.2 using /dev/vdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 lv + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap /dev/vdc1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdc1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: re-create partition /dev/vdc1 + parted: + device: /dev/vdc + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + state: present + label: gpt + + - name: prepare osd.0 again using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --filestore --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 -- cgit v1.2.3