blob: 26c185946dd6843ded0d07639a34c1090a830f8c (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
|
overrides:
ceph:
log-ignorelist:
- scrub mismatch
- ScrubResult
- wrongly marked
- \(POOL_APP_NOT_ENABLED\)
- \(SLOW_OPS\)
- overall HEALTH_
- \(MON_MSGR2_NOT_ENABLED\)
- slow request
conf:
global:
bluestore warn on legacy statfs: false
bluestore warn on no per pool omap: false
mon:
mon warn on osd down out interval zero: false
tasks:
- mds_pre_upgrade:
- print: "**** done mds pre-upgrade sequence"
- install.upgrade:
# upgrade the single cluster node, which is running all the mon/mds/osd/mgr daemons
mon.a:
branch: quincy
- print: "**** done install.upgrade the host"
- ceph.restart:
daemons: [mon.*, mgr.*]
mon-health-to-clog: false
wait-for-healthy: false
- ceph.healthy:
- ceph.restart:
daemons: [osd.*]
wait-for-healthy: false
wait-for-osds-up: true
- ceph.stop: [mds.*]
- ceph.restart:
daemons: [mds.*]
wait-for-healthy: false
wait-for-osds-up: true
- exec:
mon.a:
- ceph osd dump -f json-pretty
- ceph versions
- ceph osd require-osd-release quincy
- for f in `ceph osd pool ls` ; do ceph osd pool set $f pg_autoscale_mode off ; done
- ceph.healthy:
- print: "**** done ceph.restart"
|