blob: 468d07c24f4a9792d43c04ff4b4280c18a5b6b75 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
|
meta:
- desc: |
install ceph/luminous latest
run workload and upgrade-sequence in parallel
upgrade the client node
tasks:
- install:
branch: luminous
exclude_packages:
- librados3
- ceph-mgr-dashboard
- ceph-mgr-diskprediction-local
- ceph-mgr-diskprediction-cloud
- ceph-mgr-rook
- ceph-mgr-ssh
extra_packages: ['librados2']
- print: "**** done installing luminous"
- ceph:
log-whitelist:
- overall HEALTH_
- \(FS_
- \(MDS_
- \(OSD_
- \(MON_DOWN\)
- \(CACHE_POOL_
- \(POOL_
- \(MGR_DOWN\)
- \(PG_
- \(SMALLER_PGP_NUM\)
- Monitor daemon marked osd
- Behind on trimming
- Manager daemon
conf:
global:
mon warn on pool no app: false
bluestore_warn_on_legacy_statfs: false
mon pg warn min per osd: 0
- exec:
osd.0:
- ceph osd require-osd-release luminous
- ceph osd set-require-min-compat-client luminous
- print: "**** done ceph"
- install.upgrade:
mon.a:
mon.b:
mon.c:
- print: "**** done install.upgrade non-client hosts"
- parallel:
- workload
- upgrade-sequence
- print: "**** done parallel"
- install.upgrade:
client.0:
- print: "**** done install.upgrade on client.0"
|