summaryrefslogtreecommitdiffstats
path: root/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml
blob: 97d77a7f4601aedf9a05d56bb5f252ad10289e93 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
- hosts: osds
  become: yes
  tasks:

    - name: stop ceph-osd@2 daemon
      service:
        name: ceph-osd@2
        state: stopped

    - name: stop ceph-osd@0 daemon
      service:
        name: ceph-osd@0
        state: stopped


- hosts: mons
  become: yes
  tasks:
    - name: mark osds down
      command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
      with_items:
        - 0
        - 2

    - name: destroy osd.2
      command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
      register: result
      retries: 30
      delay: 1
      until: result is succeeded

    - name: destroy osd.0
      command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
      register: result
      retries: 30
      delay: 1
      until: result is succeeded


- hosts: osds
  become: yes
  tasks:

    # osd.2 device
    - name: zap /dev/vdd1
      command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
      environment:
        CEPH_VOLUME_DEBUG: 1

    # partitions have been completely removed, so re-create them again
    - name: re-create partition /dev/vdd for lvm data usage
      parted:
        device: /dev/vdd
        number: 1
        part_start: 0%
        part_end: 50%
        unit: '%'
        label: gpt
        state: present

    - name: redeploy osd.2 using /dev/vdd1
      command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2"
      environment:
        CEPH_VOLUME_DEBUG: 1

    # osd.0 device (zap without --destroy that removes the LV)
    - name: zap test_group/data-lv1
      command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
      environment:
        CEPH_VOLUME_DEBUG: 1

    - name: prepare osd.0 again using test_group/data-lv1
      command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
      environment:
        CEPH_VOLUME_DEBUG: 1

    - name: find all OSD directories
      find:
        paths: /var/lib/ceph/osd
        recurse: no
        file_type: directory
      register: osd_directories

    - name: find all OSD symlinks
      find:
        paths: /var/lib/ceph/osd
        recurse: yes
        depth: 2
        file_type: link
      register: osd_symlinks

    # set the OSD dir and the block/block.db links to root:root permissions, to
    # ensure that the OSD will be able to activate regardless
    - file:
        path: "{{ item.path }}"
        owner: root
        group: root
      with_items:
        - "{{ osd_directories.files }}"

    - file:
        path: "{{ item.path }}"
        owner: root
        group: root
      with_items:
        - "{{ osd_symlinks.files }}"

    - name: activate all to start the previously prepared osd.0
      command: "ceph-volume lvm activate --all"
      environment:
        CEPH_VOLUME_DEBUG: 1

    - name: node inventory
      command: "ceph-volume inventory"
      environment:
        CEPH_VOLUME_DEBUG: 1

    - name: list all OSDs
      command: "ceph-volume lvm list"
      environment:
        CEPH_VOLUME_DEBUG: 1

    - name: create temporary directory
      tempfile:
        state: directory
        suffix: sparse
      register: tmpdir

    - name: create a 1GB sparse file
      command: fallocate -l 1G {{ tmpdir.path }}/sparse.file

    - name: find an empty loop device
      command: losetup -f
      register: losetup_list

    - name: setup loop device with sparse file
      command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file

    - name: create volume group
      command: vgcreate test_zap {{ losetup_list.stdout }}
      failed_when: false

    - name: create logical volume 1
      command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
      failed_when: false

    - name: create logical volume 2
      command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
      failed_when: false

    # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
    - name: zap test_zap/data-lv1
      command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
      environment:
        CEPH_VOLUME_DEBUG: 1

    - name: zap test_zap/data-lv2
      command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
      environment:
        CEPH_VOLUME_DEBUG: 1