roles: - - mon.a - mon.b - mon.c - mgr.x - osd.0 - osd.1 - osd.2 - osd.3 openstack: - volumes: # attached to each instance count: 3 size: 20 # GB tasks: - install: - ceph: pre-mgr-commands: - sudo ceph config set mgr mgr_pool false --force conf: osd: osd recovery sleep: .1 osd min pg log entries: 10 osd max pg log entries: 1000 osd_target_pg_log_entries_per_osd: 0 osd pg log trim min: 10 log-ignorelist: - \(POOL_APP_NOT_ENABLED\) - \(OSDMAP_FLAGS\) - \(OSD_ - \(OBJECT_ - \(PG_ - \(SLOW_OPS\) - overall HEALTH - slow request - exec: osd.0: - ceph osd pool create foo 128 - ceph osd pool application enable foo foo - sleep 5 - ceph.healthy: - exec: osd.0: - rados -p foo bench 30 write -b 4096 --no-cleanup - ceph osd out 0 - sleep 5 - ceph osd set noup - ceph.restart: daemons: [osd.1] wait-for-up: false wait-for-healthy: false - exec: osd.0: - rados -p foo bench 3 write -b 4096 --no-cleanup - ceph osd unset noup - sleep 10 - for f in 0 1 2 3 ; do sudo ceph daemon osd.$f config set osd_recovery_sleep 0 ; sudo ceph daemon osd.$f config set osd_recovery_max_active 20 ; done - ceph.healthy: - exec: osd.0: - egrep '(defer backfill|defer recovery)' /var/log/ceph/ceph-osd.*.log