blob: 7438f9e775cfa82955b06435ebacbc4021485417 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
|
roles:
- - mon.a
- mon.b
- mon.c
- mgr.x
- osd.0
- osd.1
- osd.2
- osd.3
openstack:
- volumes: # attached to each instance
count: 3
size: 20 # GB
tasks:
- install:
- ceph:
pre-mgr-commands:
- sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
conf:
osd:
osd recovery sleep: .1
osd min pg log entries: 10
osd max pg log entries: 1000
osd_target_pg_log_entries_per_osd: 0
osd pg log trim min: 10
log-ignorelist:
- \(POOL_APP_NOT_ENABLED\)
- \(OSDMAP_FLAGS\)
- \(OSD_
- \(OBJECT_
- \(PG_
- \(SLOW_OPS\)
- overall HEALTH
- slow request
- exec:
osd.0:
- ceph osd pool create foo 128
- ceph osd pool application enable foo foo
- sleep 5
- ceph.healthy:
- exec:
osd.0:
- rados -p foo bench 30 write -b 4096 --no-cleanup
- ceph osd out 0
- sleep 5
- ceph osd set noup
- ceph.restart:
daemons: [osd.1]
wait-for-up: false
wait-for-healthy: false
- exec:
osd.0:
- rados -p foo bench 3 write -b 4096 --no-cleanup
- ceph osd unset noup
- sleep 10
- for f in 0 1 2 3 ; do sudo ceph daemon osd.$f config set osd_recovery_sleep 0 ; sudo ceph daemon osd.$f config set osd_recovery_max_active 20 ; done
- ceph.healthy:
- exec:
osd.0:
- egrep '(defer backfill|defer recovery)' /var/log/ceph/ceph-osd.*.log
|