diff options
Diffstat (limited to 'src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks')
3 files changed, 344 insertions, 0 deletions
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml new file mode 100644 index 00000000..37a48949 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml @@ -0,0 +1,27 @@ +--- + +- hosts: osds + gather_facts: false + become: yes + tasks: + + - name: partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: partition /dev/sdd lvm journals + parted: + device: /dev/sdd + number: 2 + part_start: 50% + part_end: 100% + unit: '%' + state: present + label: gpt + diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml new file mode 100644 index 00000000..1e9b8c3e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml @@ -0,0 +1,148 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: redeploy osd.2 using /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 device (zap without --destroy that removes the LV) + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: prepare osd.0 again using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: find all OSD directories + find: + paths: /var/lib/ceph/osd + recurse: no + file_type: directory + register: osd_directories + + - name: find all OSD symlinks + find: + paths: /var/lib/ceph/osd + recurse: yes + depth: 2 + file_type: link + register: osd_symlinks + + # set the OSD dir and the block/block.db links to root:root permissions, to + # ensure that the OSD will be able to activate regardless + - file: + path: "{{ item.path }}" + owner: root + group: root + with_items: + - "{{ osd_directories.files }}" + + - file: + path: "{{ item.path }}" + owner: root + group: root + with_items: + - "{{ osd_symlinks.files }}" + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: create temporary directory + tempfile: + state: directory + suffix: sparse + register: tmpdir + + - name: create a 5GB sparse file + command: fallocate -l 5G {{ tmpdir.path }}/sparse.file + + - name: find an empty loop device + command: losetup -f + register: losetup_list + + - name: setup loop device with sparse file + command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file + + - name: create volume group + command: vgcreate test_zap {{ losetup_list.stdout }} + failed_when: false + + - name: create logical volume 1 + command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap + failed_when: false + + - name: create logical volume 2 + command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap + failed_when: false + + # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed + - name: zap test_zap/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap test_zap/data-lv2 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml new file mode 100644 index 00000000..4e43839e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml @@ -0,0 +1,169 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.2 journal + - name: zap /dev/sdd2 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: re-create partition /dev/sdd lvm journals + parted: + device: /dev/sdd + number: 2 + part_start: 50% + part_end: 100% + unit: '%' + state: present + label: gpt + + - name: redeploy osd.2 using /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 data lv + # note: we don't use --destroy here to test this works without that flag. + # --destroy is used in the bluestore tests + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 journal device (zap without --destroy that removes the LV) + - name: zap /dev/sdc1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: prepare osd.0 again using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: find all OSD paths + find: + paths: /var/lib/ceph/osd + recurse: no + file_type: directory + register: osd_paths + + # set all OSD paths to root:rootto ensure that the OSD will be able to + # activate regardless + - name: mangle permissions to root + file: + path: "{{ item.path }}" + owner: root + group: root + recurse: yes + with_items: + - "{{ osd_paths.files }}" + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@1 daemon + service: + name: ceph-osd@1 + state: stopped + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --filestore --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: create temporary directory + tempfile: + state: directory + suffix: sparse + register: tmpdir + + - name: create a 5GB sparse file + command: fallocate -l 5G {{ tmpdir.path }}/sparse.file + + - name: find an empty loop device + command: losetup -f + register: losetup_list + + - name: setup loop device with sparse file + command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file + + - name: create volume group + command: vgcreate test_zap {{ losetup_list.stdout }} + failed_when: false + + - name: create logical volume 1 + command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap + failed_when: false + + - name: create logical volume 2 + command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap + failed_when: false + + # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed + - name: zap test_zap/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap test_zap/data-lv2 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2" + environment: + CEPH_VOLUME_DEBUG: 1 |