summaryrefslogtreecommitdiffstats
path: root/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml161
1 files changed, 161 insertions, 0 deletions
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml
new file mode 100644
index 000000000..97d77a7f4
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml
@@ -0,0 +1,161 @@
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ - name: stop ceph-osd@2 daemon
+ service:
+ name: ceph-osd@2
+ state: stopped
+
+ - name: stop ceph-osd@0 daemon
+ service:
+ name: ceph-osd@0
+ state: stopped
+
+
+- hosts: mons
+ become: yes
+ tasks:
+ - name: mark osds down
+ command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ with_items:
+ - 0
+ - 2
+
+ - name: destroy osd.2
+ command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+ register: result
+ retries: 30
+ delay: 1
+ until: result is succeeded
+
+ - name: destroy osd.0
+ command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+ register: result
+ retries: 30
+ delay: 1
+ until: result is succeeded
+
+
+- hosts: osds
+ become: yes
+ tasks:
+
+ # osd.2 device
+ - name: zap /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # partitions have been completely removed, so re-create them again
+ - name: re-create partition /dev/vdd for lvm data usage
+ parted:
+ device: /dev/vdd
+ number: 1
+ part_start: 0%
+ part_end: 50%
+ unit: '%'
+ label: gpt
+ state: present
+
+ - name: redeploy osd.2 using /dev/vdd1
+ command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ # osd.0 device (zap without --destroy that removes the LV)
+ - name: zap test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: prepare osd.0 again using test_group/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: find all OSD directories
+ find:
+ paths: /var/lib/ceph/osd
+ recurse: no
+ file_type: directory
+ register: osd_directories
+
+ - name: find all OSD symlinks
+ find:
+ paths: /var/lib/ceph/osd
+ recurse: yes
+ depth: 2
+ file_type: link
+ register: osd_symlinks
+
+ # set the OSD dir and the block/block.db links to root:root permissions, to
+ # ensure that the OSD will be able to activate regardless
+ - file:
+ path: "{{ item.path }}"
+ owner: root
+ group: root
+ with_items:
+ - "{{ osd_directories.files }}"
+
+ - file:
+ path: "{{ item.path }}"
+ owner: root
+ group: root
+ with_items:
+ - "{{ osd_symlinks.files }}"
+
+ - name: activate all to start the previously prepared osd.0
+ command: "ceph-volume lvm activate --all"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: node inventory
+ command: "ceph-volume inventory"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: list all OSDs
+ command: "ceph-volume lvm list"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: create temporary directory
+ tempfile:
+ state: directory
+ suffix: sparse
+ register: tmpdir
+
+ - name: create a 1GB sparse file
+ command: fallocate -l 1G {{ tmpdir.path }}/sparse.file
+
+ - name: find an empty loop device
+ command: losetup -f
+ register: losetup_list
+
+ - name: setup loop device with sparse file
+ command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file
+
+ - name: create volume group
+ command: vgcreate test_zap {{ losetup_list.stdout }}
+ failed_when: false
+
+ - name: create logical volume 1
+ command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
+ failed_when: false
+
+ - name: create logical volume 2
+ command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
+ failed_when: false
+
+ # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
+ - name: zap test_zap/data-lv1
+ command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
+ environment:
+ CEPH_VOLUME_DEBUG: 1
+
+ - name: zap test_zap/data-lv2
+ command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
+ environment:
+ CEPH_VOLUME_DEBUG: 1