blob: ebaf84199ca3a8d903ad66f6420e883db16f4fb4 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
|
meta:
- desc: |
Run ceph on two nodes, using one of them as a client,
with a separate client-only node.
Use xfs beneath the osds.
install ceph/pacific v16.2.5 and the v16.2.x point versions
run workload and upgrade-sequence in parallel
(every point release should be tested)
run workload and upgrade-sequence in parallel
install ceph/pacific latest version
run workload and upgrade-sequence in parallel
Overall upgrade path is - pacific-latest.point-1 => pacific-latest.point => pacific-latest
overrides:
ceph:
log-ignorelist:
- reached quota
- scrub
- osd_map_max_advance
- wrongly marked
- FS_DEGRADED
- POOL_APP_NOT_ENABLED
- CACHE_POOL_NO_HIT_SET
- POOL_FULL
- SMALLER_PG
- pool\(s\) full
- OSD_DOWN
- missing hit_sets
- CACHE_POOL_NEAR_FULL
- PG_AVAILABILITY
- PG_DEGRADED
- application not enabled
- cache pools at or near target size
- filesystem is degraded
- OBJECT_MISPLACED
### ref: https://tracker.ceph.com/issues/40251
#removed see ^ - failed to encode map
fs: xfs
conf:
global:
mon_warn_on_pool_no_app: false
mon_mds_skip_sanity: true
mon:
mon debug unsafe allow tier with nonempty snaps: true
osd:
osd map max advance: 1000
osd_class_default_list: "*"
osd_class_load_list: "*"
client:
rgw_crypt_require_ssl: false
rgw crypt s3 kms backend: testing
rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
roles:
- - mon.a
- mds.a
- osd.0
- osd.1
- osd.2
- mgr.x
- - mon.b
- mon.c
- osd.3
- osd.4
- osd.5
- client.0
- - client.1
openstack:
- volumes: # attached to each instance
count: 3
size: 30 # GB
tasks:
- print: "**** done pacific about to install v16.2.5 "
- install:
tag: v16.2.5
# line below can be removed its from jewel test
#exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
- print: "**** done v16.2.5 install"
- ceph:
fs: xfs
add_osds_to_crush: true
- print: "**** done ceph xfs"
- sequential:
- workload
- print: "**** done workload v16.2.5"
####### upgrade to v16.2.7
- install.upgrade:
#exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
mon.a:
tag: v16.2.7
mon.b:
tag: v16.2.7
- parallel:
- workload_pacific
- upgrade-sequence_pacific
- print: "**** done parallel pacific v16.2.5"
#### upgrade to latest pacific
- install.upgrade:
mon.a:
mon.b:
- parallel:
- workload_pacific
- upgrade-sequence_pacific
- print: "**** done parallel pacific branch"
#######################
workload:
sequential:
- workunit:
clients:
client.0:
- suites/blogbench.sh
workload_pacific:
full_sequential:
- workunit:
branch: pacific
#tag: v16.2.1
clients:
client.1:
- rados/test.sh
- cls
env:
CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces'
- print: "**** done rados/test.sh & cls workload_pacific"
- sequential:
- rgw: [client.0]
- print: "**** done rgw workload_pacific"
- s3tests:
client.0:
force-branch: ceph-pacific
rgw_server: client.0
scan_for_encryption_keys: false
- print: "**** done s3tests workload_pacific"
- rbd_fsx:
clients: [client.0]
size: 134217728
- print: "**** done rbd_fsx workload_pacific"
upgrade-sequence_pacific:
sequential:
- print: "**** done branch: pacific install.upgrade"
- ceph.restart: [mds.a]
- sleep:
duration: 60
- ceph.restart: [osd.0]
- sleep:
duration: 30
- ceph.restart: [osd.1]
- sleep:
duration: 30
- ceph.restart: [osd.2]
- sleep:
duration: 30
- ceph.restart: [osd.3]
- sleep:
duration: 30
- ceph.restart: [osd.4]
- sleep:
duration: 30
- ceph.restart: [osd.5]
- sleep:
duration: 60
- ceph.restart: [mgr.x]
- sleep:
duration: 60
- ceph.restart: [mon.a]
- sleep:
duration: 60
- ceph.restart: [mon.b]
- sleep:
duration: 60
- ceph.restart: [mon.c]
- sleep:
duration: 60
- print: "**** done ceph.restart all pacific branch mds/osd/mon"
|