blob: eee713292623cbc60460e0d937b50f8e9b0960ed (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
|
meta:
- desc: |
Run ceph on two nodes, using one of them as a client,
with a separate client-only node.
Use xfs beneath the osds.
install ceph/reef v18.2.2 and the subsequent v18.2.x point versions
run workload and upgrade-sequence in parallel
(every point release should be tested)
run workload and upgrade-sequence in parallel
install ceph/reef latest version
run workload and upgrade-sequence in parallel
Overall upgrade path is - reef-latest.point-1 => reef-latest.point => reef-latest
overrides:
ceph:
log-ignorelist:
- reached quota
- scrub
- osd_map_max_advance
- wrongly marked
- FS_DEGRADED
- POOL_APP_NOT_ENABLED
- CACHE_POOL_NO_HIT_SET
- POOL_FULL
- SMALLER_PG
- pool\(s\) full
- OSD_DOWN
- missing hit_sets
- CACHE_POOL_NEAR_FULL
- PG_AVAILABILITY
- PG_DEGRADED
- application not enabled
- cache pools at or near target size
- filesystem is degraded
- OBJECT_MISPLACED
### ref: https://tracker.ceph.com/issues/40251
#removed see ^ - failed to encode map
fs: xfs
conf:
global:
mon_warn_on_pool_no_app: false
mon_mds_skip_sanity: true
mon:
mon debug unsafe allow tier with nonempty snaps: true
osd:
osd map max advance: 1000
osd_class_default_list: "*"
osd_class_load_list: "*"
client:
rgw_crypt_require_ssl: false
rgw crypt s3 kms backend: testing
rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
roles:
- - mon.a
- mds.a
- osd.0
- osd.1
- osd.2
- mgr.x
- - mon.b
- mon.c
- osd.3
- osd.4
- osd.5
- client.0
- - client.1
openstack:
- volumes: # attached to each instance
count: 3
size: 30 # GB
tasks:
- print: "**** done reef about to install v18.2.2 "
# See https://tracker.ceph.com/issues/66505. Versions < v18.2.2 contain the crc bug.
- install:
tag: v18.2.2
# line below can be removed its from jewel test
#exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
- print: "**** done v18.2.2 install"
- ceph:
fs: xfs
add_osds_to_crush: true
- print: "**** done ceph xfs"
- sequential:
- workload
- print: "**** done workload v18.2.2"
# TODO: uncomment when v18.2.3 is available
####### upgrade to v18.2.3
# - install.upgrade:
# mon.a:
# tag: v18.2.3
# mon.b:
# tag: v18.2.3
#- parallel:
# - workload_reef
# - upgrade-sequence_reef
#- print: "**** done parallel reef v18.2.3"
#### upgrade to latest reef
- install.upgrade:
mon.a:
mon.b:
- parallel:
- workload_reef
- upgrade-sequence_reef
- print: "**** done parallel reef branch"
#######################
workload:
sequential:
- workunit:
clients:
client.0:
- suites/blogbench.sh
workload_reef:
full_sequential:
- workunit:
branch: reef
# tag: v18.2.2
clients:
client.1:
- rados/test.sh
- cls
env:
CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_snapshot'
- print: "**** done rados/test.sh & cls workload_reef"
- sequential:
- rgw: [client.0]
- print: "**** done rgw workload_reef"
- rbd_fsx:
clients: [client.0]
size: 134217728
- print: "**** done rbd_fsx workload_reef"
upgrade-sequence_reef:
sequential:
- print: "**** done branch: reef install.upgrade"
- ceph.restart: [mds.a]
- sleep:
duration: 60
- ceph.restart: [osd.0]
- sleep:
duration: 30
- ceph.restart: [osd.1]
- sleep:
duration: 30
- ceph.restart: [osd.2]
- sleep:
duration: 30
- ceph.restart: [osd.3]
- sleep:
duration: 30
- ceph.restart: [osd.4]
- sleep:
duration: 30
- ceph.restart: [osd.5]
- sleep:
duration: 60
- ceph.restart: [mgr.x]
- sleep:
duration: 60
- ceph.restart: [mon.a]
- sleep:
duration: 60
- ceph.restart: [mon.b]
- sleep:
duration: 60
- ceph.restart: [mon.c]
- sleep:
duration: 60
- print: "**** done ceph.restart all reef branch mds/osd/mon"
|