summaryrefslogtreecommitdiffstats
path: root/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml
blob: a9b6aa2677849d8b7c0872ae58dbf24730386137 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
- hosts: osds
  become: yes
  tasks:

    - name: stop ceph-osd@2 daemon
      service:
        name: ceph-osd@2
        state: stopped

    - name: stop ceph-osd@0 daemon
      service:
        name: ceph-osd@0
        state: stopped


- hosts: mons
  become: yes
  tasks:
    - name: mark osds down
      command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
      with_items:
        - 0
        - 2

    - name: destroy osd.2
      command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
      register: result
      retries: 30
      delay: 1
      until: result is succeeded

    - name: destroy osd.0
      command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
      register: result
      retries: 30
      delay: 1
      until: result is succeeded

- hosts: osds
  become: yes
  tasks:

    # osd.2 device
    - name: zap /dev/vdd1
      command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
      environment:
        CEPH_VOLUME_DEBUG: 1

    # osd.2 journal
    - name: zap /dev/vdd2
      command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd2 --destroy"
      environment:
        CEPH_VOLUME_DEBUG: 1

    # partitions have been completely removed, so re-create them again
    - name: re-create partition /dev/vdd for lvm data usage
      parted:
        device: /dev/vdd
        number: 1
        part_start: 0%
        part_end: 50%
        unit: '%'
        label: gpt
        state: present

    - name: re-create partition /dev/vdd lvm journals
      parted:
        device: /dev/vdd
        number: 2
        part_start: 50%
        part_end: 100%
        unit: '%'
        state: present
        label: gpt

    - name: redeploy osd.2 using /dev/vdd1
      command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/vdd1 --journal /dev/vdd2 --osd-id 2"
      environment:
        CEPH_VOLUME_DEBUG: 1

    # osd.0 data lv
    # note: we don't use --destroy here to test this works without that flag.
    # --destroy is used in the bluestore tests
    - name: zap test_group/data-lv1
      command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
      environment:
        CEPH_VOLUME_DEBUG: 1

    # osd.0 journal device
    - name: zap /dev/vdc1
      command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy /dev/vdc1"
      environment:
        CEPH_VOLUME_DEBUG: 1

    - name: re-create partition /dev/vdc1
      parted:
        device: /dev/vdc
        number: 1
        part_start: 0%
        part_end: 50%
        unit: '%'
        state: present
        label: gpt

    - name: prepare osd.0 again using test_group/data-lv1
      command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/vdc1 --osd-id 0"
      environment:
        CEPH_VOLUME_DEBUG: 1

    - name: find all OSD paths
      find:
        paths: /var/lib/ceph/osd
        recurse: no
        file_type: directory
      register: osd_paths

    # set all OSD paths to root:rootto ensure that the OSD will be able to
    # activate regardless
    - name: mangle permissions to root
      file:
        path: "{{ item.path }}"
        owner: root
        group: root
        recurse: yes
      with_items:
        - "{{ osd_paths.files }}"

    - name: stop ceph-osd@2 daemon
      service:
        name: ceph-osd@2
        state: stopped

    - name: stop ceph-osd@1 daemon
      service:
        name: ceph-osd@1
        state: stopped

    - name: activate all to start the previously prepared osd.0
      command: "ceph-volume lvm activate --filestore --all"
      environment:
        CEPH_VOLUME_DEBUG: 1

    - name: node inventory
      command: "ceph-volume inventory"
      environment:
        CEPH_VOLUME_DEBUG: 1

    - name: list all OSDs
      command: "ceph-volume lvm list"
      environment:
        CEPH_VOLUME_DEBUG: 1

    - name: create temporary directory
      tempfile:
        state: directory
        suffix: sparse
      register: tmpdir

    - name: create a 1GB sparse file
      command: fallocate -l 1G {{ tmpdir.path }}/sparse.file

    - name: find an empty loop device
      command: losetup -f
      register: losetup_list

    - name: setup loop device with sparse file
      command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file

    - name: create volume group
      command: vgcreate test_zap {{ losetup_list.stdout }}
      failed_when: false

    - name: create logical volume 1
      command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap
      failed_when: false

    - name: create logical volume 2
      command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap
      failed_when: false

    # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
    - name: zap test_zap/data-lv1
      command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
      environment:
        CEPH_VOLUME_DEBUG: 1

    - name: zap test_zap/data-lv2
      command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
      environment:
        CEPH_VOLUME_DEBUG: 1