summaryrefslogtreecommitdiffstats
path: root/qa/suites/rados
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /qa/suites/rados
parentInitial commit. (diff)
downloadceph-upstream.tar.xz
ceph-upstream.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/suites/rados')
l---------qa/suites/rados/.qa1
-rw-r--r--qa/suites/rados/basic/%0
l---------qa/suites/rados/basic/.qa1
-rw-r--r--qa/suites/rados/basic/ceph.yaml13
-rw-r--r--qa/suites/rados/basic/clusters/+0
l---------qa/suites/rados/basic/clusters/.qa1
l---------qa/suites/rados/basic/clusters/fixed-2.yaml1
-rw-r--r--qa/suites/rados/basic/clusters/openstack.yaml4
l---------qa/suites/rados/basic/msgr1
l---------qa/suites/rados/basic/msgr-failures/.qa1
-rw-r--r--qa/suites/rados/basic/msgr-failures/few.yaml7
-rw-r--r--qa/suites/rados/basic/msgr-failures/many.yaml7
l---------qa/suites/rados/basic/objectstore1
l---------qa/suites/rados/basic/rados.yaml1
l---------qa/suites/rados/basic/supported-random-distro$1
l---------qa/suites/rados/basic/tasks/.qa1
-rw-r--r--qa/suites/rados/basic/tasks/rados_api_tests.yaml24
-rw-r--r--qa/suites/rados/basic/tasks/rados_cls_all.yaml13
-rw-r--r--qa/suites/rados/basic/tasks/rados_python.yaml15
-rw-r--r--qa/suites/rados/basic/tasks/rados_stress_watch.yaml11
-rw-r--r--qa/suites/rados/basic/tasks/rados_striper.yaml7
-rw-r--r--qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml11
-rw-r--r--qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml11
-rw-r--r--qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml11
-rw-r--r--qa/suites/rados/basic/tasks/readwrite.yaml17
-rw-r--r--qa/suites/rados/basic/tasks/repair_test.yaml31
-rw-r--r--qa/suites/rados/basic/tasks/rgw_snaps.yaml41
-rw-r--r--qa/suites/rados/basic/tasks/scrub_test.yaml30
-rw-r--r--qa/suites/rados/dashboard/%0
l---------qa/suites/rados/dashboard/.qa1
-rw-r--r--qa/suites/rados/dashboard/clusters/+0
l---------qa/suites/rados/dashboard/clusters/.qa1
l---------qa/suites/rados/dashboard/clusters/2-node-mgr.yaml1
l---------qa/suites/rados/dashboard/debug/.qa1
l---------qa/suites/rados/dashboard/debug/mgr.yaml1
l---------qa/suites/rados/dashboard/objectstore1
l---------qa/suites/rados/dashboard/supported-random-distro$1
l---------qa/suites/rados/dashboard/tasks/.qa1
-rw-r--r--qa/suites/rados/dashboard/tasks/dashboard.yaml51
-rw-r--r--qa/suites/rados/mgr/%0
l---------qa/suites/rados/mgr/.qa1
-rw-r--r--qa/suites/rados/mgr/clusters/+0
l---------qa/suites/rados/mgr/clusters/.qa1
l---------qa/suites/rados/mgr/clusters/2-node-mgr.yaml1
l---------qa/suites/rados/mgr/debug/.qa1
l---------qa/suites/rados/mgr/debug/mgr.yaml1
l---------qa/suites/rados/mgr/objectstore1
l---------qa/suites/rados/mgr/supported-random-distro$1
l---------qa/suites/rados/mgr/tasks/.qa1
-rw-r--r--qa/suites/rados/mgr/tasks/crash.yaml17
-rw-r--r--qa/suites/rados/mgr/tasks/failover.yaml16
-rw-r--r--qa/suites/rados/mgr/tasks/insights.yaml19
-rw-r--r--qa/suites/rados/mgr/tasks/module_selftest.yaml25
-rw-r--r--qa/suites/rados/mgr/tasks/orchestrator_cli.yaml18
-rw-r--r--qa/suites/rados/mgr/tasks/progress.yaml24
-rw-r--r--qa/suites/rados/mgr/tasks/prometheus.yaml16
-rw-r--r--qa/suites/rados/mgr/tasks/ssh_orchestrator.yaml18
-rw-r--r--qa/suites/rados/mgr/tasks/workunits.yaml16
-rw-r--r--qa/suites/rados/monthrash/%0
l---------qa/suites/rados/monthrash/.qa1
-rw-r--r--qa/suites/rados/monthrash/ceph.yaml25
l---------qa/suites/rados/monthrash/clusters/.qa1
-rw-r--r--qa/suites/rados/monthrash/clusters/3-mons.yaml7
-rw-r--r--qa/suites/rados/monthrash/clusters/9-mons.yaml7
l---------qa/suites/rados/monthrash/msgr1
l---------qa/suites/rados/monthrash/msgr-failures/.qa1
-rw-r--r--qa/suites/rados/monthrash/msgr-failures/few.yaml7
-rw-r--r--qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml13
l---------qa/suites/rados/monthrash/objectstore1
l---------qa/suites/rados/monthrash/rados.yaml1
l---------qa/suites/rados/monthrash/supported-random-distro$1
l---------qa/suites/rados/monthrash/thrashers/.qa1
-rw-r--r--qa/suites/rados/monthrash/thrashers/force-sync-many.yaml12
-rw-r--r--qa/suites/rados/monthrash/thrashers/many.yaml16
-rw-r--r--qa/suites/rados/monthrash/thrashers/one.yaml9
-rw-r--r--qa/suites/rados/monthrash/thrashers/sync-many.yaml14
-rw-r--r--qa/suites/rados/monthrash/thrashers/sync.yaml13
l---------qa/suites/rados/monthrash/workloads/.qa1
-rw-r--r--qa/suites/rados/monthrash/workloads/pool-create-delete.yaml58
-rw-r--r--qa/suites/rados/monthrash/workloads/rados_5925.yaml9
-rw-r--r--qa/suites/rados/monthrash/workloads/rados_api_tests.yaml26
-rw-r--r--qa/suites/rados/monthrash/workloads/rados_mon_osdmap_prune.yaml22
-rw-r--r--qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml17
-rw-r--r--qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml13
-rw-r--r--qa/suites/rados/multimon/%0
l---------qa/suites/rados/multimon/.qa1
l---------qa/suites/rados/multimon/clusters/.qa1
-rw-r--r--qa/suites/rados/multimon/clusters/21.yaml8
-rw-r--r--qa/suites/rados/multimon/clusters/3.yaml7
-rw-r--r--qa/suites/rados/multimon/clusters/6.yaml7
-rw-r--r--qa/suites/rados/multimon/clusters/9.yaml8
l---------qa/suites/rados/multimon/msgr1
l---------qa/suites/rados/multimon/msgr-failures/.qa1
-rw-r--r--qa/suites/rados/multimon/msgr-failures/few.yaml7
-rw-r--r--qa/suites/rados/multimon/msgr-failures/many.yaml8
-rw-r--r--qa/suites/rados/multimon/no_pools.yaml3
l---------qa/suites/rados/multimon/objectstore1
l---------qa/suites/rados/multimon/rados.yaml1
l---------qa/suites/rados/multimon/supported-random-distro$1
l---------qa/suites/rados/multimon/tasks/.qa1
-rw-r--r--qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml11
-rw-r--r--qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml24
-rw-r--r--qa/suites/rados/multimon/tasks/mon_recovery.yaml10
-rw-r--r--qa/suites/rados/objectstore/%0
l---------qa/suites/rados/objectstore/.qa1
l---------qa/suites/rados/objectstore/backends/.qa1
-rw-r--r--qa/suites/rados/objectstore/backends/alloc-hint.yaml22
-rw-r--r--qa/suites/rados/objectstore/backends/ceph_objectstore_tool.yaml25
-rw-r--r--qa/suites/rados/objectstore/backends/filejournal.yaml13
-rw-r--r--qa/suites/rados/objectstore/backends/filestore-idempotent-aio-journal.yaml14
-rw-r--r--qa/suites/rados/objectstore/backends/filestore-idempotent.yaml11
-rw-r--r--qa/suites/rados/objectstore/backends/fusestore.yaml9
-rw-r--r--qa/suites/rados/objectstore/backends/keyvaluedb.yaml8
-rw-r--r--qa/suites/rados/objectstore/backends/objectcacher-stress.yaml14
-rw-r--r--qa/suites/rados/objectstore/backends/objectstore.yaml12
l---------qa/suites/rados/objectstore/supported-random-distro$1
-rw-r--r--qa/suites/rados/perf/%0
l---------qa/suites/rados/perf/.qa1
-rw-r--r--qa/suites/rados/perf/ceph.yaml14
l---------qa/suites/rados/perf/distros/ubuntu_16.04.yaml1
l---------qa/suites/rados/perf/distros/ubuntu_latest.yaml1
l---------qa/suites/rados/perf/objectstore1
-rw-r--r--qa/suites/rados/perf/openstack.yaml4
l---------qa/suites/rados/perf/settings/.qa1
-rw-r--r--qa/suites/rados/perf/settings/optimized.yaml76
l---------qa/suites/rados/perf/workloads/.qa1
-rw-r--r--qa/suites/rados/perf/workloads/cosbench_64K_read_write.yaml26
-rw-r--r--qa/suites/rados/perf/workloads/cosbench_64K_write.yaml26
-rw-r--r--qa/suites/rados/perf/workloads/fio_4K_rand_read.yaml25
-rw-r--r--qa/suites/rados/perf/workloads/fio_4K_rand_rw.yaml25
-rw-r--r--qa/suites/rados/perf/workloads/fio_4M_rand_read.yaml25
-rw-r--r--qa/suites/rados/perf/workloads/fio_4M_rand_rw.yaml25
-rw-r--r--qa/suites/rados/perf/workloads/fio_4M_rand_write.yaml25
-rw-r--r--qa/suites/rados/perf/workloads/radosbench_4K_rand_read.yaml25
-rw-r--r--qa/suites/rados/perf/workloads/radosbench_4K_seq_read.yaml24
-rw-r--r--qa/suites/rados/perf/workloads/radosbench_4M_rand_read.yaml25
-rw-r--r--qa/suites/rados/perf/workloads/radosbench_4M_seq_read.yaml24
-rw-r--r--qa/suites/rados/perf/workloads/radosbench_4M_write.yaml24
-rw-r--r--qa/suites/rados/perf/workloads/sample_fio.yaml25
-rw-r--r--qa/suites/rados/perf/workloads/sample_radosbench.yaml24
-rw-r--r--qa/suites/rados/rest/%0
l---------qa/suites/rados/rest/.qa1
-rw-r--r--qa/suites/rados/rest/mgr-restful.yaml29
l---------qa/suites/rados/rest/supported-random-distro$1
-rw-r--r--qa/suites/rados/singleton-bluestore/%0
l---------qa/suites/rados/singleton-bluestore/.qa1
l---------qa/suites/rados/singleton-bluestore/all/.qa1
-rw-r--r--qa/suites/rados/singleton-bluestore/all/cephtool.yaml44
l---------qa/suites/rados/singleton-bluestore/msgr1
l---------qa/suites/rados/singleton-bluestore/msgr-failures/.qa1
-rw-r--r--qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml7
-rw-r--r--qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml7
l---------qa/suites/rados/singleton-bluestore/objectstore/.qa1
l---------qa/suites/rados/singleton-bluestore/objectstore/bluestore-bitmap.yaml1
l---------qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-lz4.yaml1
l---------qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml1
l---------qa/suites/rados/singleton-bluestore/rados.yaml1
l---------qa/suites/rados/singleton-bluestore/supported-random-distro$1
l---------qa/suites/rados/singleton-flat/.qa1
-rw-r--r--qa/suites/rados/singleton-flat/valgrind-leaks.yaml36
-rw-r--r--qa/suites/rados/singleton-nomsgr/%0
l---------qa/suites/rados/singleton-nomsgr/.qa1
l---------qa/suites/rados/singleton-nomsgr/all/.qa1
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml24
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/balancer.yaml10
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml52
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml21
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml12
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml38
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml38
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml20
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml27
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml16
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml22
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/msgr.yaml21
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml48
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/pool-access.yaml13
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml58
l---------qa/suites/rados/singleton-nomsgr/rados.yaml1
l---------qa/suites/rados/singleton-nomsgr/supported-random-distro$1
-rw-r--r--qa/suites/rados/singleton/%0
l---------qa/suites/rados/singleton/.qa1
l---------qa/suites/rados/singleton/all/.qa1
-rw-r--r--qa/suites/rados/singleton/all/admin-socket.yaml26
-rw-r--r--qa/suites/rados/singleton/all/deduptool.yaml26
-rw-r--r--qa/suites/rados/singleton/all/divergent_priors.yaml26
-rw-r--r--qa/suites/rados/singleton/all/divergent_priors2.yaml26
-rw-r--r--qa/suites/rados/singleton/all/dump-stuck.yaml19
-rw-r--r--qa/suites/rados/singleton/all/ec-lost-unfound.yaml26
-rw-r--r--qa/suites/rados/singleton/all/erasure-code-nonregression.yaml17
-rw-r--r--qa/suites/rados/singleton/all/lost-unfound-delete.yaml25
-rw-r--r--qa/suites/rados/singleton/all/lost-unfound.yaml25
-rw-r--r--qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml27
-rw-r--r--qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml32
-rw-r--r--qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml32
-rw-r--r--qa/suites/rados/singleton/all/mon-auth-caps.yaml17
-rw-r--r--qa/suites/rados/singleton/all/mon-config-key-caps.yaml17
-rw-r--r--qa/suites/rados/singleton/all/mon-config-keys.yaml20
-rw-r--r--qa/suites/rados/singleton/all/mon-config.yaml20
-rw-r--r--qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled152
-rw-r--r--qa/suites/rados/singleton/all/osd-backfill.yaml26
-rw-r--r--qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml28
-rw-r--r--qa/suites/rados/singleton/all/osd-recovery.yaml30
-rw-r--r--qa/suites/rados/singleton/all/peer.yaml25
-rw-r--r--qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml42
-rw-r--r--qa/suites/rados/singleton/all/pg-autoscaler.yaml38
-rw-r--r--qa/suites/rados/singleton/all/pg-removal-interruption.yaml34
-rw-r--r--qa/suites/rados/singleton/all/radostool.yaml26
-rw-r--r--qa/suites/rados/singleton/all/random-eio.yaml44
-rw-r--r--qa/suites/rados/singleton/all/rebuild-mondb.yaml32
-rw-r--r--qa/suites/rados/singleton/all/recovery-preemption.yaml57
-rw-r--r--qa/suites/rados/singleton/all/resolve_stuck_peering.yaml17
-rw-r--r--qa/suites/rados/singleton/all/test-crash.yaml15
-rw-r--r--qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml19
-rw-r--r--qa/suites/rados/singleton/all/thrash-backfill-full.yaml50
-rw-r--r--qa/suites/rados/singleton/all/thrash-eio.yaml47
-rw-r--r--qa/suites/rados/singleton/all/thrash-rados/+0
l---------qa/suites/rados/singleton/all/thrash-rados/.qa1
-rw-r--r--qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml27
l---------qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml1
-rw-r--r--qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml70
-rw-r--r--qa/suites/rados/singleton/all/watch-notify-same-primary.yaml32
l---------qa/suites/rados/singleton/msgr1
l---------qa/suites/rados/singleton/msgr-failures/.qa1
-rw-r--r--qa/suites/rados/singleton/msgr-failures/few.yaml7
-rw-r--r--qa/suites/rados/singleton/msgr-failures/many.yaml11
l---------qa/suites/rados/singleton/objectstore1
l---------qa/suites/rados/singleton/rados.yaml1
l---------qa/suites/rados/singleton/supported-random-distro$1
-rw-r--r--qa/suites/rados/standalone/%0
l---------qa/suites/rados/standalone/.qa1
l---------qa/suites/rados/standalone/supported-random-distro$1
l---------qa/suites/rados/standalone/workloads/.qa1
-rw-r--r--qa/suites/rados/standalone/workloads/crush.yaml18
-rw-r--r--qa/suites/rados/standalone/workloads/erasure-code.yaml18
-rw-r--r--qa/suites/rados/standalone/workloads/mgr.yaml18
-rw-r--r--qa/suites/rados/standalone/workloads/misc.yaml18
-rw-r--r--qa/suites/rados/standalone/workloads/mon.yaml18
-rw-r--r--qa/suites/rados/standalone/workloads/osd.yaml18
-rw-r--r--qa/suites/rados/standalone/workloads/scrub.yaml18
-rw-r--r--qa/suites/rados/thrash-erasure-code-big/%0
l---------qa/suites/rados/thrash-erasure-code-big/.qa1
l---------qa/suites/rados/thrash-erasure-code-big/ceph.yaml1
-rw-r--r--qa/suites/rados/thrash-erasure-code-big/cluster/+0
l---------qa/suites/rados/thrash-erasure-code-big/cluster/.qa1
-rw-r--r--qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml4
-rw-r--r--qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml4
l---------qa/suites/rados/thrash-erasure-code-big/msgr-failures1
l---------qa/suites/rados/thrash-erasure-code-big/objectstore1
l---------qa/suites/rados/thrash-erasure-code-big/rados.yaml1
l---------qa/suites/rados/thrash-erasure-code-big/recovery-overrides1
l---------qa/suites/rados/thrash-erasure-code-big/supported-random-distro$1
l---------qa/suites/rados/thrash-erasure-code-big/thrashers/.qa1
-rw-r--r--qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml20
-rw-r--r--qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml19
-rw-r--r--qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml20
-rw-r--r--qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml21
-rw-r--r--qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml16
-rw-r--r--qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml15
l---------qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml1
l---------qa/suites/rados/thrash-erasure-code-big/workloads/.qa1
l---------qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml1
l---------qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml1
-rw-r--r--qa/suites/rados/thrash-erasure-code-isa/%0
l---------qa/suites/rados/thrash-erasure-code-isa/.qa1
l---------qa/suites/rados/thrash-erasure-code-isa/arch/.qa1
-rw-r--r--qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml1
l---------qa/suites/rados/thrash-erasure-code-isa/ceph.yaml1
l---------qa/suites/rados/thrash-erasure-code-isa/clusters1
l---------qa/suites/rados/thrash-erasure-code-isa/msgr-failures1
l---------qa/suites/rados/thrash-erasure-code-isa/objectstore1
l---------qa/suites/rados/thrash-erasure-code-isa/rados.yaml1
l---------qa/suites/rados/thrash-erasure-code-isa/recovery-overrides1
l---------qa/suites/rados/thrash-erasure-code-isa/supported-random-distro$1
l---------qa/suites/rados/thrash-erasure-code-isa/thrashers1
l---------qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml1
l---------qa/suites/rados/thrash-erasure-code-isa/workloads/.qa1
l---------qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml1
-rw-r--r--qa/suites/rados/thrash-erasure-code-overwrites/%0
l---------qa/suites/rados/thrash-erasure-code-overwrites/.qa1
l---------qa/suites/rados/thrash-erasure-code-overwrites/bluestore-bitmap.yaml1
l---------qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml1
l---------qa/suites/rados/thrash-erasure-code-overwrites/clusters1
l---------qa/suites/rados/thrash-erasure-code-overwrites/fast1
l---------qa/suites/rados/thrash-erasure-code-overwrites/msgr-failures1
l---------qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml1
l---------qa/suites/rados/thrash-erasure-code-overwrites/recovery-overrides1
l---------qa/suites/rados/thrash-erasure-code-overwrites/supported-random-distro$1
l---------qa/suites/rados/thrash-erasure-code-overwrites/thrashers1
l---------qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml1
l---------qa/suites/rados/thrash-erasure-code-overwrites/workloads/.qa1
-rw-r--r--qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml23
-rw-r--r--qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml29
-rw-r--r--qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml28
-rw-r--r--qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml22
-rw-r--r--qa/suites/rados/thrash-erasure-code-shec/%0
l---------qa/suites/rados/thrash-erasure-code-shec/.qa1
l---------qa/suites/rados/thrash-erasure-code-shec/ceph.yaml1
-rw-r--r--qa/suites/rados/thrash-erasure-code-shec/clusters/+0
l---------qa/suites/rados/thrash-erasure-code-shec/clusters/.qa1
l---------qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml1
-rw-r--r--qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml4
l---------qa/suites/rados/thrash-erasure-code-shec/msgr-failures1
l---------qa/suites/rados/thrash-erasure-code-shec/objectstore1
l---------qa/suites/rados/thrash-erasure-code-shec/rados.yaml1
l---------qa/suites/rados/thrash-erasure-code-shec/recovery-overrides1
l---------qa/suites/rados/thrash-erasure-code-shec/supported-random-distro$1
l---------qa/suites/rados/thrash-erasure-code-shec/thrashers/.qa1
-rw-r--r--qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml20
-rw-r--r--qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml19
l---------qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml1
l---------qa/suites/rados/thrash-erasure-code-shec/workloads/.qa1
l---------qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml1
-rw-r--r--qa/suites/rados/thrash-erasure-code/%0
l---------qa/suites/rados/thrash-erasure-code/.qa1
-rw-r--r--qa/suites/rados/thrash-erasure-code/ceph.yaml3
l---------qa/suites/rados/thrash-erasure-code/clusters1
l---------qa/suites/rados/thrash-erasure-code/fast/.qa1
-rw-r--r--qa/suites/rados/thrash-erasure-code/fast/fast.yaml5
-rw-r--r--qa/suites/rados/thrash-erasure-code/fast/normal.yaml0
l---------qa/suites/rados/thrash-erasure-code/msgr-failures1
l---------qa/suites/rados/thrash-erasure-code/objectstore1
l---------qa/suites/rados/thrash-erasure-code/rados.yaml1
l---------qa/suites/rados/thrash-erasure-code/recovery-overrides1
l---------qa/suites/rados/thrash-erasure-code/supported-random-distro$1
l---------qa/suites/rados/thrash-erasure-code/thrashers/.qa1
-rw-r--r--qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml19
-rw-r--r--qa/suites/rados/thrash-erasure-code/thrashers/default.yaml18
-rw-r--r--qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml20
-rw-r--r--qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml16
-rw-r--r--qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml16
l---------qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml1
l---------qa/suites/rados/thrash-erasure-code/workloads/.qa1
l---------qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=clay-k=4-m=2.yaml1
l---------qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml1
l---------qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml1
-rw-r--r--qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml27
-rw-r--r--qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml21
-rw-r--r--qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-many-deletes.yaml14
-rw-r--r--qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml20
-rw-r--r--qa/suites/rados/thrash-old-clients/%0
l---------qa/suites/rados/thrash-old-clients/.qa1
l---------qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/.qa1
l---------qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/2-size-2-min-size.yaml1
l---------qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/3-size-2-min-size.yaml1
l---------qa/suites/rados/thrash-old-clients/1-install/.qa1
-rw-r--r--qa/suites/rados/thrash-old-clients/1-install/hammer.yaml29
-rw-r--r--qa/suites/rados/thrash-old-clients/1-install/jewel.yaml19
-rw-r--r--qa/suites/rados/thrash-old-clients/1-install/luminous.yaml15
l---------qa/suites/rados/thrash-old-clients/backoff/.qa1
-rw-r--r--qa/suites/rados/thrash-old-clients/backoff/normal.yaml0
-rw-r--r--qa/suites/rados/thrash-old-clients/backoff/peering.yaml5
-rw-r--r--qa/suites/rados/thrash-old-clients/backoff/peering_and_degraded.yaml6
-rw-r--r--qa/suites/rados/thrash-old-clients/ceph.yaml7
-rw-r--r--qa/suites/rados/thrash-old-clients/clusters/+0
l---------qa/suites/rados/thrash-old-clients/clusters/.qa1
-rw-r--r--qa/suites/rados/thrash-old-clients/clusters/openstack.yaml4
-rw-r--r--qa/suites/rados/thrash-old-clients/clusters/three-plus-one.yaml14
l---------qa/suites/rados/thrash-old-clients/d-balancer/.qa1
-rw-r--r--qa/suites/rados/thrash-old-clients/d-balancer/crush-compat.yaml6
-rw-r--r--qa/suites/rados/thrash-old-clients/d-balancer/off.yaml0
l---------qa/suites/rados/thrash-old-clients/distro$/.qa1
l---------qa/suites/rados/thrash-old-clients/distro$/centos_latest.yaml1
l---------qa/suites/rados/thrash-old-clients/distro$/ubuntu_16.04.yaml1
l---------qa/suites/rados/thrash-old-clients/msgr-failures/.qa1
-rw-r--r--qa/suites/rados/thrash-old-clients/msgr-failures/fastclose.yaml8
-rw-r--r--qa/suites/rados/thrash-old-clients/msgr-failures/few.yaml9
-rw-r--r--qa/suites/rados/thrash-old-clients/msgr-failures/osd-delay.yaml11
l---------qa/suites/rados/thrash-old-clients/msgr/.qa1
l---------qa/suites/rados/thrash-old-clients/msgr/async-v1only.yaml1
l---------qa/suites/rados/thrash-old-clients/msgr/async.yaml1
l---------qa/suites/rados/thrash-old-clients/msgr/random.yaml1
l---------qa/suites/rados/thrash-old-clients/msgr/simple.yaml1
l---------qa/suites/rados/thrash-old-clients/rados.yaml1
l---------qa/suites/rados/thrash-old-clients/thrashers/.qa1
-rw-r--r--qa/suites/rados/thrash-old-clients/thrashers/careful.yaml25
-rw-r--r--qa/suites/rados/thrash-old-clients/thrashers/default.yaml24
-rw-r--r--qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml26
-rw-r--r--qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml22
-rw-r--r--qa/suites/rados/thrash-old-clients/thrashers/none.yaml0
-rw-r--r--qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml24
l---------qa/suites/rados/thrash-old-clients/thrashosds-health.yaml1
l---------qa/suites/rados/thrash-old-clients/workloads/.qa1
-rw-r--r--qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml34
-rw-r--r--qa/suites/rados/thrash-old-clients/workloads/radosbench.yaml41
-rw-r--r--qa/suites/rados/thrash-old-clients/workloads/rbd_cls.yaml7
-rw-r--r--qa/suites/rados/thrash-old-clients/workloads/snaps-few-objects.yaml13
-rw-r--r--qa/suites/rados/thrash-old-clients/workloads/test_rbd_api.yaml8
-rw-r--r--qa/suites/rados/thrash/%0
l---------qa/suites/rados/thrash/.qa1
l---------qa/suites/rados/thrash/0-size-min-size-overrides/.qa1
l---------qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml1
l---------qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml1
l---------qa/suites/rados/thrash/1-pg-log-overrides/.qa1
-rw-r--r--qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml0
l---------qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml1
-rw-r--r--qa/suites/rados/thrash/2-recovery-overrides/$0
l---------qa/suites/rados/thrash/2-recovery-overrides/.qa1
-rw-r--r--qa/suites/rados/thrash/2-recovery-overrides/default.yaml0
l---------qa/suites/rados/thrash/2-recovery-overrides/more-active-recovery.yaml1
l---------qa/suites/rados/thrash/backoff/.qa1
-rw-r--r--qa/suites/rados/thrash/backoff/normal.yaml0
-rw-r--r--qa/suites/rados/thrash/backoff/peering.yaml5
-rw-r--r--qa/suites/rados/thrash/backoff/peering_and_degraded.yaml6
-rw-r--r--qa/suites/rados/thrash/ceph.yaml3
-rw-r--r--qa/suites/rados/thrash/clusters/+0
l---------qa/suites/rados/thrash/clusters/.qa1
l---------qa/suites/rados/thrash/clusters/fixed-2.yaml1
-rw-r--r--qa/suites/rados/thrash/clusters/openstack.yaml4
-rw-r--r--qa/suites/rados/thrash/crc-failures/bad_map_crc_failure.yaml7
-rw-r--r--qa/suites/rados/thrash/crc-failures/default.yaml0
l---------qa/suites/rados/thrash/d-balancer/.qa1
-rw-r--r--qa/suites/rados/thrash/d-balancer/crush-compat.yaml6
-rw-r--r--qa/suites/rados/thrash/d-balancer/off.yaml0
-rw-r--r--qa/suites/rados/thrash/d-balancer/upmap.yaml7
l---------qa/suites/rados/thrash/msgr1
l---------qa/suites/rados/thrash/msgr-failures/.qa1
-rw-r--r--qa/suites/rados/thrash/msgr-failures/fastclose.yaml8
-rw-r--r--qa/suites/rados/thrash/msgr-failures/few.yaml9
-rw-r--r--qa/suites/rados/thrash/msgr-failures/osd-delay.yaml11
l---------qa/suites/rados/thrash/objectstore1
l---------qa/suites/rados/thrash/rados.yaml1
l---------qa/suites/rados/thrash/supported-random-distro$1
l---------qa/suites/rados/thrash/thrashers/.qa1
-rw-r--r--qa/suites/rados/thrash/thrashers/careful.yaml26
-rw-r--r--qa/suites/rados/thrash/thrashers/default.yaml26
-rw-r--r--qa/suites/rados/thrash/thrashers/mapgap.yaml27
-rw-r--r--qa/suites/rados/thrash/thrashers/morepggrow.yaml22
-rw-r--r--qa/suites/rados/thrash/thrashers/none.yaml0
-rw-r--r--qa/suites/rados/thrash/thrashers/pggrow.yaml24
l---------qa/suites/rados/thrash/thrashosds-health.yaml1
l---------qa/suites/rados/thrash/workloads/.qa1
-rw-r--r--qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml13
-rw-r--r--qa/suites/rados/thrash/workloads/cache-agent-big.yaml36
-rw-r--r--qa/suites/rados/thrash/workloads/cache-agent-small.yaml34
-rw-r--r--qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml39
-rw-r--r--qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml44
-rw-r--r--qa/suites/rados/thrash/workloads/cache-snaps.yaml39
-rw-r--r--qa/suites/rados/thrash/workloads/cache.yaml36
-rw-r--r--qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml18
-rw-r--r--qa/suites/rados/thrash/workloads/rados_api_tests.yaml20
-rw-r--r--qa/suites/rados/thrash/workloads/radosbench-high-concurrency.yaml49
-rw-r--r--qa/suites/rados/thrash/workloads/radosbench.yaml33
-rw-r--r--qa/suites/rados/thrash/workloads/redirect.yaml15
-rw-r--r--qa/suites/rados/thrash/workloads/redirect_promote_tests.yaml14
-rw-r--r--qa/suites/rados/thrash/workloads/redirect_set_object.yaml13
-rw-r--r--qa/suites/rados/thrash/workloads/set-chunks-read.yaml13
-rw-r--r--qa/suites/rados/thrash/workloads/small-objects.yaml24
-rw-r--r--qa/suites/rados/thrash/workloads/snaps-few-objects.yaml13
-rw-r--r--qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml8
l---------qa/suites/rados/upgrade/.qa1
l---------qa/suites/rados/upgrade/mimic-x-singleton1
-rw-r--r--qa/suites/rados/verify/%0
l---------qa/suites/rados/verify/.qa1
-rw-r--r--qa/suites/rados/verify/ceph.yaml13
-rw-r--r--qa/suites/rados/verify/clusters/+0
l---------qa/suites/rados/verify/clusters/.qa1
l---------qa/suites/rados/verify/clusters/fixed-2.yaml1
-rw-r--r--qa/suites/rados/verify/clusters/openstack.yaml4
l---------qa/suites/rados/verify/d-thrash/.qa1
-rw-r--r--qa/suites/rados/verify/d-thrash/default/+0
l---------qa/suites/rados/verify/d-thrash/default/.qa1
-rw-r--r--qa/suites/rados/verify/d-thrash/default/default.yaml11
l---------qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml1
-rw-r--r--qa/suites/rados/verify/d-thrash/none.yaml0
l---------qa/suites/rados/verify/msgr1
l---------qa/suites/rados/verify/msgr-failures/.qa1
-rw-r--r--qa/suites/rados/verify/msgr-failures/few.yaml7
l---------qa/suites/rados/verify/objectstore1
l---------qa/suites/rados/verify/rados.yaml1
l---------qa/suites/rados/verify/tasks/.qa1
-rw-r--r--qa/suites/rados/verify/tasks/mon_recovery.yaml10
-rw-r--r--qa/suites/rados/verify/tasks/rados_api_tests.yaml30
-rw-r--r--qa/suites/rados/verify/tasks/rados_cls_all.yaml13
l---------qa/suites/rados/verify/validater/.qa1
-rw-r--r--qa/suites/rados/verify/validater/lockdep.yaml5
-rw-r--r--qa/suites/rados/verify/validater/valgrind.yaml32
477 files changed, 5062 insertions, 0 deletions
diff --git a/qa/suites/rados/.qa b/qa/suites/rados/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/basic/% b/qa/suites/rados/basic/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/basic/%
diff --git a/qa/suites/rados/basic/.qa b/qa/suites/rados/basic/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/basic/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/basic/ceph.yaml b/qa/suites/rados/basic/ceph.yaml
new file mode 100644
index 00000000..c0857e14
--- /dev/null
+++ b/qa/suites/rados/basic/ceph.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/rados/basic/clusters/+ b/qa/suites/rados/basic/clusters/+
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/basic/clusters/+
diff --git a/qa/suites/rados/basic/clusters/.qa b/qa/suites/rados/basic/clusters/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/basic/clusters/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/basic/clusters/fixed-2.yaml b/qa/suites/rados/basic/clusters/fixed-2.yaml
new file mode 120000
index 00000000..230ff0fd
--- /dev/null
+++ b/qa/suites/rados/basic/clusters/fixed-2.yaml
@@ -0,0 +1 @@
+.qa/clusters/fixed-2.yaml \ No newline at end of file
diff --git a/qa/suites/rados/basic/clusters/openstack.yaml b/qa/suites/rados/basic/clusters/openstack.yaml
new file mode 100644
index 00000000..e559d912
--- /dev/null
+++ b/qa/suites/rados/basic/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
diff --git a/qa/suites/rados/basic/msgr b/qa/suites/rados/basic/msgr
new file mode 120000
index 00000000..57bee80d
--- /dev/null
+++ b/qa/suites/rados/basic/msgr
@@ -0,0 +1 @@
+.qa/msgr \ No newline at end of file
diff --git a/qa/suites/rados/basic/msgr-failures/.qa b/qa/suites/rados/basic/msgr-failures/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/basic/msgr-failures/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/basic/msgr-failures/few.yaml b/qa/suites/rados/basic/msgr-failures/few.yaml
new file mode 100644
index 00000000..4326fe23
--- /dev/null
+++ b/qa/suites/rados/basic/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/basic/msgr-failures/many.yaml b/qa/suites/rados/basic/msgr-failures/many.yaml
new file mode 100644
index 00000000..f4bb065b
--- /dev/null
+++ b/qa/suites/rados/basic/msgr-failures/many.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 1500
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/basic/objectstore b/qa/suites/rados/basic/objectstore
new file mode 120000
index 00000000..c40bd326
--- /dev/null
+++ b/qa/suites/rados/basic/objectstore
@@ -0,0 +1 @@
+.qa/objectstore \ No newline at end of file
diff --git a/qa/suites/rados/basic/rados.yaml b/qa/suites/rados/basic/rados.yaml
new file mode 120000
index 00000000..d256979c
--- /dev/null
+++ b/qa/suites/rados/basic/rados.yaml
@@ -0,0 +1 @@
+.qa/config/rados.yaml \ No newline at end of file
diff --git a/qa/suites/rados/basic/supported-random-distro$ b/qa/suites/rados/basic/supported-random-distro$
new file mode 120000
index 00000000..0862b445
--- /dev/null
+++ b/qa/suites/rados/basic/supported-random-distro$
@@ -0,0 +1 @@
+.qa/distros/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/basic/tasks/.qa b/qa/suites/rados/basic/tasks/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/basic/tasks/rados_api_tests.yaml b/qa/suites/rados/basic/tasks/rados_api_tests.yaml
new file mode 100644
index 00000000..b90d8089
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/rados_api_tests.yaml
@@ -0,0 +1,24 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - but it is still running
+ - overall HEALTH_
+ - \(POOL_FULL\)
+ - \(SMALLER_PGP_NUM\)
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(PG_AVAILABILITY\)
+ conf:
+ client:
+ debug ms: 1
+ mon:
+ mon warn on pool no app: false
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
+ - rados/test_pool_quota.sh
+
diff --git a/qa/suites/rados/basic/tasks/rados_cls_all.yaml b/qa/suites/rados/basic/tasks/rados_cls_all.yaml
new file mode 100644
index 00000000..bcc58e19
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/rados_cls_all.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
+ rgw sdk timeindex user version"
+ osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
+ rgw sdk timeindex user version"
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - cls
diff --git a/qa/suites/rados/basic/tasks/rados_python.yaml b/qa/suites/rados/basic/tasks/rados_python.yaml
new file mode 100644
index 00000000..8c70304d
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/rados_python.yaml
@@ -0,0 +1,15 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(PG_
+ - \(OSD_
+ - \(OBJECT_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/test_python.sh
diff --git a/qa/suites/rados/basic/tasks/rados_stress_watch.yaml b/qa/suites/rados/basic/tasks/rados_stress_watch.yaml
new file mode 100644
index 00000000..bee513eb
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/rados_stress_watch.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(TOO_FEW_PGS\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/stress_watch.sh
diff --git a/qa/suites/rados/basic/tasks/rados_striper.yaml b/qa/suites/rados/basic/tasks/rados_striper.yaml
new file mode 100644
index 00000000..c19cc83a
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/rados_striper.yaml
@@ -0,0 +1,7 @@
+tasks:
+- exec:
+ client.0:
+ - ceph_test_rados_striper_api_io
+ - ceph_test_rados_striper_api_aio
+ - ceph_test_rados_striper_api_striping
+
diff --git a/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml
new file mode 100644
index 00000000..2dade6de
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-big.sh
diff --git a/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml
new file mode 100644
index 00000000..6b764a87
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix.sh
diff --git a/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml
new file mode 100644
index 00000000..c82023c2
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mostlyread.sh
diff --git a/qa/suites/rados/basic/tasks/readwrite.yaml b/qa/suites/rados/basic/tasks/readwrite.yaml
new file mode 100644
index 00000000..f135107c
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/readwrite.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ crush_tunables: optimal
+ conf:
+ mon:
+ mon osd initial require min compat client: luminous
+ osd:
+ osd_discard_disconnected_ops: false
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
diff --git a/qa/suites/rados/basic/tasks/repair_test.yaml b/qa/suites/rados/basic/tasks/repair_test.yaml
new file mode 100644
index 00000000..f3a7868d
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/repair_test.yaml
@@ -0,0 +1,31 @@
+overrides:
+ ceph:
+ wait-for-scrub: false
+ log-whitelist:
+ - candidate had a stat error
+ - candidate had a read error
+ - deep-scrub 0 missing, 1 inconsistent objects
+ - deep-scrub 0 missing, 4 inconsistent objects
+ - deep-scrub [0-9]+ errors
+ - '!= omap_digest'
+ - '!= data_digest'
+ - repair 0 missing, 1 inconsistent objects
+ - repair 0 missing, 4 inconsistent objects
+ - repair [0-9]+ errors, [0-9]+ fixed
+ - scrub 0 missing, 1 inconsistent objects
+ - scrub [0-9]+ errors
+ - 'size 1 != size'
+ - attr name mismatch
+ - Regular scrub request, deep-scrub details will be lost
+ - candidate size [0-9]+ info size [0-9]+ mismatch
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ conf:
+ osd:
+ filestore debug inject read err: true
+ bluestore debug inject read err: true
+tasks:
+- repair_test:
+
diff --git a/qa/suites/rados/basic/tasks/rgw_snaps.yaml b/qa/suites/rados/basic/tasks/rgw_snaps.yaml
new file mode 100644
index 00000000..c94b7b2b
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/rgw_snaps.yaml
@@ -0,0 +1,41 @@
+overrides:
+ ceph:
+ conf:
+ client:
+ debug rgw: 20
+ debug ms: 1
+ osd:
+ osd_max_omap_entries_per_request: 10
+tasks:
+- rgw:
+ client.0:
+- ceph_manager.wait_for_pools:
+ kwargs:
+ pools:
+ - default.rgw.buckets.data
+ - default.rgw.buckets.index
+ - .rgw.root
+ - default.rgw.control
+ - default.rgw.meta
+ - default.rgw.log
+- thrash_pool_snaps:
+ pools:
+ - default.rgw.buckets.data
+ - default.rgw.buckets.index
+ - .rgw.root
+ - default.rgw.control
+ - default.rgw.meta
+ - default.rgw.log
+- s3readwrite:
+ client.0:
+ force-branch: ceph-nautilus
+ rgw_server: client.0
+ readwrite:
+ bucket: rwtest
+ readers: 10
+ writers: 3
+ duration: 300
+ files:
+ num: 10
+ size: 2000
+ stddev: 500
diff --git a/qa/suites/rados/basic/tasks/scrub_test.yaml b/qa/suites/rados/basic/tasks/scrub_test.yaml
new file mode 100644
index 00000000..00e85f9e
--- /dev/null
+++ b/qa/suites/rados/basic/tasks/scrub_test.yaml
@@ -0,0 +1,30 @@
+overrides:
+ ceph:
+ wait-for-scrub: false
+ log-whitelist:
+ - '!= data_digest'
+ - '!= omap_digest'
+ - '!= size'
+ - 'deep-scrub 0 missing, 1 inconsistent objects'
+ - 'deep-scrub [0-9]+ errors'
+ - 'repair 0 missing, 1 inconsistent objects'
+ - 'repair [0-9]+ errors, [0-9]+ fixed'
+ - 'shard [0-9]+ .* : missing'
+ - 'deep-scrub 1 missing, 1 inconsistent objects'
+ - 'does not match object info size'
+ - 'attr name mistmatch'
+ - 'deep-scrub 1 missing, 0 inconsistent objects'
+ - 'failed to pick suitable auth object'
+ - 'candidate size [0-9]+ info size [0-9]+ mismatch'
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OSD_SCRUB_ERRORS\)
+ - \(TOO_FEW_PGS\)
+ conf:
+ osd:
+ osd deep scrub update digest min age: 0
+ osd skip data digest: false
+tasks:
+- scrub_test:
diff --git a/qa/suites/rados/dashboard/% b/qa/suites/rados/dashboard/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/dashboard/%
diff --git a/qa/suites/rados/dashboard/.qa b/qa/suites/rados/dashboard/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/dashboard/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/dashboard/clusters/+ b/qa/suites/rados/dashboard/clusters/+
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/dashboard/clusters/+
diff --git a/qa/suites/rados/dashboard/clusters/.qa b/qa/suites/rados/dashboard/clusters/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/dashboard/clusters/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/dashboard/clusters/2-node-mgr.yaml b/qa/suites/rados/dashboard/clusters/2-node-mgr.yaml
new file mode 120000
index 00000000..8a0b9123
--- /dev/null
+++ b/qa/suites/rados/dashboard/clusters/2-node-mgr.yaml
@@ -0,0 +1 @@
+.qa/clusters/2-node-mgr.yaml \ No newline at end of file
diff --git a/qa/suites/rados/dashboard/debug/.qa b/qa/suites/rados/dashboard/debug/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/dashboard/debug/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/dashboard/debug/mgr.yaml b/qa/suites/rados/dashboard/debug/mgr.yaml
new file mode 120000
index 00000000..651e5f8a
--- /dev/null
+++ b/qa/suites/rados/dashboard/debug/mgr.yaml
@@ -0,0 +1 @@
+.qa/debug/mgr.yaml \ No newline at end of file
diff --git a/qa/suites/rados/dashboard/objectstore b/qa/suites/rados/dashboard/objectstore
new file mode 120000
index 00000000..c40bd326
--- /dev/null
+++ b/qa/suites/rados/dashboard/objectstore
@@ -0,0 +1 @@
+.qa/objectstore \ No newline at end of file
diff --git a/qa/suites/rados/dashboard/supported-random-distro$ b/qa/suites/rados/dashboard/supported-random-distro$
new file mode 120000
index 00000000..7cef21ee
--- /dev/null
+++ b/qa/suites/rados/dashboard/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/dashboard/tasks/.qa b/qa/suites/rados/dashboard/tasks/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/dashboard/tasks/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/dashboard/tasks/dashboard.yaml b/qa/suites/rados/dashboard/tasks/dashboard.yaml
new file mode 100644
index 00000000..ad6adc7c
--- /dev/null
+++ b/qa/suites/rados/dashboard/tasks/dashboard.yaml
@@ -0,0 +1,51 @@
+
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - \(FS_DEGRADED\)
+ - \(MDS_FAILED\)
+ - \(MDS_DEGRADED\)
+ - \(FS_WITH_FAILED_MDS\)
+ - \(MDS_DAMAGE\)
+ - \(MDS_ALL_DOWN\)
+ - \(MDS_UP_LESS_THAN_MAX\)
+ - \(OSD_DOWN\)
+ - \(OSD_HOST_DOWN\)
+ - \(POOL_APP_NOT_ENABLED\)
+ - pauserd,pausewr flag\(s\) set
+ - Monitor daemon marked osd\.[[:digit:]]+ down, but it is still running
+ - evicting unresponsive client .+
+ - rgw: [client.0]
+ - cephfs_test_runner:
+ fail_on_skip: false
+ modules:
+ - tasks.mgr.test_dashboard
+ - tasks.mgr.dashboard.test_auth
+ - tasks.mgr.dashboard.test_cephfs
+ - tasks.mgr.dashboard.test_cluster_configuration
+ - tasks.mgr.dashboard.test_health
+ - tasks.mgr.dashboard.test_host
+ - tasks.mgr.dashboard.test_logs
+ - tasks.mgr.dashboard.test_monitor
+ - tasks.mgr.dashboard.test_osd
+ - tasks.mgr.dashboard.test_perf_counters
+ - tasks.mgr.dashboard.test_summary
+ - tasks.mgr.dashboard.test_rgw
+ - tasks.mgr.dashboard.test_rbd
+ - tasks.mgr.dashboard.test_pool
+ - tasks.mgr.dashboard.test_requests
+ - tasks.mgr.dashboard.test_role
+ - tasks.mgr.dashboard.test_settings
+ - tasks.mgr.dashboard.test_user
+ - tasks.mgr.dashboard.test_erasure_code_profile
+ - tasks.mgr.dashboard.test_mgr_module
+ - tasks.mgr.dashboard.test_ganesha
diff --git a/qa/suites/rados/mgr/% b/qa/suites/rados/mgr/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/mgr/%
diff --git a/qa/suites/rados/mgr/.qa b/qa/suites/rados/mgr/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/mgr/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/mgr/clusters/+ b/qa/suites/rados/mgr/clusters/+
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/mgr/clusters/+
diff --git a/qa/suites/rados/mgr/clusters/.qa b/qa/suites/rados/mgr/clusters/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/mgr/clusters/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/mgr/clusters/2-node-mgr.yaml b/qa/suites/rados/mgr/clusters/2-node-mgr.yaml
new file mode 120000
index 00000000..8a0b9123
--- /dev/null
+++ b/qa/suites/rados/mgr/clusters/2-node-mgr.yaml
@@ -0,0 +1 @@
+.qa/clusters/2-node-mgr.yaml \ No newline at end of file
diff --git a/qa/suites/rados/mgr/debug/.qa b/qa/suites/rados/mgr/debug/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/mgr/debug/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/mgr/debug/mgr.yaml b/qa/suites/rados/mgr/debug/mgr.yaml
new file mode 120000
index 00000000..651e5f8a
--- /dev/null
+++ b/qa/suites/rados/mgr/debug/mgr.yaml
@@ -0,0 +1 @@
+.qa/debug/mgr.yaml \ No newline at end of file
diff --git a/qa/suites/rados/mgr/objectstore b/qa/suites/rados/mgr/objectstore
new file mode 120000
index 00000000..c40bd326
--- /dev/null
+++ b/qa/suites/rados/mgr/objectstore
@@ -0,0 +1 @@
+.qa/objectstore \ No newline at end of file
diff --git a/qa/suites/rados/mgr/supported-random-distro$ b/qa/suites/rados/mgr/supported-random-distro$
new file mode 120000
index 00000000..7cef21ee
--- /dev/null
+++ b/qa/suites/rados/mgr/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/mgr/tasks/.qa b/qa/suites/rados/mgr/tasks/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/mgr/tasks/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/mgr/tasks/crash.yaml b/qa/suites/rados/mgr/tasks/crash.yaml
new file mode 100644
index 00000000..7b4c4446
--- /dev/null
+++ b/qa/suites/rados/mgr/tasks/crash.yaml
@@ -0,0 +1,17 @@
+
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - \(RECENT_CRASH\)
+ - replacing it with standby
+ - No standby daemons available
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_crash
diff --git a/qa/suites/rados/mgr/tasks/failover.yaml b/qa/suites/rados/mgr/tasks/failover.yaml
new file mode 100644
index 00000000..34be4715
--- /dev/null
+++ b/qa/suites/rados/mgr/tasks/failover.yaml
@@ -0,0 +1,16 @@
+
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_failover
diff --git a/qa/suites/rados/mgr/tasks/insights.yaml b/qa/suites/rados/mgr/tasks/insights.yaml
new file mode 100644
index 00000000..52160665
--- /dev/null
+++ b/qa/suites/rados/mgr/tasks/insights.yaml
@@ -0,0 +1,19 @@
+
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(MGR_INSIGHTS_WARNING\)
+ - \(insights_health_check
+ - \(PG_
+ - \(RECENT_CRASH\)
+ - replacing it with standby
+ - No standby daemons available
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_insights
diff --git a/qa/suites/rados/mgr/tasks/module_selftest.yaml b/qa/suites/rados/mgr/tasks/module_selftest.yaml
new file mode 100644
index 00000000..11053d6a
--- /dev/null
+++ b/qa/suites/rados/mgr/tasks/module_selftest.yaml
@@ -0,0 +1,25 @@
+
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - Reduced data availability
+ - Degraded data redundancy
+ - objects misplaced
+ - Synthetic exception in serve
+ - influxdb python module not found
+ - \(MGR_ZABBIX_
+ - foo bar
+ - Failed to open Telegraf
+ - evicting unresponsive client
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_module_selftest
diff --git a/qa/suites/rados/mgr/tasks/orchestrator_cli.yaml b/qa/suites/rados/mgr/tasks/orchestrator_cli.yaml
new file mode 100644
index 00000000..65b1d78b
--- /dev/null
+++ b/qa/suites/rados/mgr/tasks/orchestrator_cli.yaml
@@ -0,0 +1,18 @@
+
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(MGR_INSIGHTS_WARNING\)
+ - \(insights_health_check
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_orchestrator_cli \ No newline at end of file
diff --git a/qa/suites/rados/mgr/tasks/progress.yaml b/qa/suites/rados/mgr/tasks/progress.yaml
new file mode 100644
index 00000000..78462575
--- /dev/null
+++ b/qa/suites/rados/mgr/tasks/progress.yaml
@@ -0,0 +1,24 @@
+
+tasks:
+ - install:
+ - ceph:
+ config:
+ global:
+ osd pool default size : 3
+ osd pool default min size : 2
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(MDS_ALL_DOWN\)
+ - \(MDS_UP_LESS_THAN_MAX\)
+ - \(FS_WITH_FAILED_MDS\)
+ - \(FS_DEGRADED\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_progress
diff --git a/qa/suites/rados/mgr/tasks/prometheus.yaml b/qa/suites/rados/mgr/tasks/prometheus.yaml
new file mode 100644
index 00000000..1a777681
--- /dev/null
+++ b/qa/suites/rados/mgr/tasks/prometheus.yaml
@@ -0,0 +1,16 @@
+
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_prometheus
diff --git a/qa/suites/rados/mgr/tasks/ssh_orchestrator.yaml b/qa/suites/rados/mgr/tasks/ssh_orchestrator.yaml
new file mode 100644
index 00000000..cd606f76
--- /dev/null
+++ b/qa/suites/rados/mgr/tasks/ssh_orchestrator.yaml
@@ -0,0 +1,18 @@
+
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(MGR_INSIGHTS_WARNING\)
+ - \(insights_health_check
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - cephfs_test_runner:
+ modules:
+ - tasks.mgr.test_ssh_orchestrator
diff --git a/qa/suites/rados/mgr/tasks/workunits.yaml b/qa/suites/rados/mgr/tasks/workunits.yaml
new file mode 100644
index 00000000..d7261f44
--- /dev/null
+++ b/qa/suites/rados/mgr/tasks/workunits.yaml
@@ -0,0 +1,16 @@
+tasks:
+ - install:
+ - ceph:
+ # tests may leave mgrs broken, so don't try and call into them
+ # to invoke e.g. pg dump during teardown.
+ wait-for-scrub: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - replacing it with standby
+ - No standby daemons available
+ - workunit:
+ clients:
+ client.0:
+ - mgr \ No newline at end of file
diff --git a/qa/suites/rados/monthrash/% b/qa/suites/rados/monthrash/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/monthrash/%
diff --git a/qa/suites/rados/monthrash/.qa b/qa/suites/rados/monthrash/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/monthrash/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/monthrash/ceph.yaml b/qa/suites/rados/monthrash/ceph.yaml
new file mode 100644
index 00000000..6c53b315
--- /dev/null
+++ b/qa/suites/rados/monthrash/ceph.yaml
@@ -0,0 +1,25 @@
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon min osdmap epochs: 25
+ paxos service trim min: 5
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+ mon scrub inject crc mismatch: 0.01
+ mon scrub inject missing keys: 0.05
+# thrashing monitors may make mgr have trouble w/ its keepalive
+ log-whitelist:
+ - ScrubResult
+ - scrub mismatch
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+# slow mons -> slow peering -> PG_AVAILABILITY
+ - \(PG_AVAILABILITY\)
+ - \(SLOW_OPS\)
+ - slow request
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/rados/monthrash/clusters/.qa b/qa/suites/rados/monthrash/clusters/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/monthrash/clusters/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/monthrash/clusters/3-mons.yaml b/qa/suites/rados/monthrash/clusters/3-mons.yaml
new file mode 100644
index 00000000..4b721ef8
--- /dev/null
+++ b/qa/suites/rados/monthrash/clusters/3-mons.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, osd.0, osd.1, osd.2]
+- [mon.b, mgr.x, osd.3, osd.4, osd.5, client.0]
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
diff --git a/qa/suites/rados/monthrash/clusters/9-mons.yaml b/qa/suites/rados/monthrash/clusters/9-mons.yaml
new file mode 100644
index 00000000..a2874c1d
--- /dev/null
+++ b/qa/suites/rados/monthrash/clusters/9-mons.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.b, mon.c, mon.d, mon.e, osd.0, osd.1, osd.2]
+- [mon.f, mon.g, mon.h, mon.i, mgr.x, osd.3, osd.4, osd.5, client.0]
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
diff --git a/qa/suites/rados/monthrash/msgr b/qa/suites/rados/monthrash/msgr
new file mode 120000
index 00000000..57bee80d
--- /dev/null
+++ b/qa/suites/rados/monthrash/msgr
@@ -0,0 +1 @@
+.qa/msgr \ No newline at end of file
diff --git a/qa/suites/rados/monthrash/msgr-failures/.qa b/qa/suites/rados/monthrash/msgr-failures/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/monthrash/msgr-failures/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/monthrash/msgr-failures/few.yaml b/qa/suites/rados/monthrash/msgr-failures/few.yaml
new file mode 100644
index 00000000..4326fe23
--- /dev/null
+++ b/qa/suites/rados/monthrash/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml b/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml
new file mode 100644
index 00000000..fcd8ca7c
--- /dev/null
+++ b/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms inject delay type: mon
+ ms inject delay probability: .005
+ ms inject delay max: 1
+ ms inject internal delays: .002
+ mgr:
+ debug monc: 10
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/monthrash/objectstore b/qa/suites/rados/monthrash/objectstore
new file mode 120000
index 00000000..c40bd326
--- /dev/null
+++ b/qa/suites/rados/monthrash/objectstore
@@ -0,0 +1 @@
+.qa/objectstore \ No newline at end of file
diff --git a/qa/suites/rados/monthrash/rados.yaml b/qa/suites/rados/monthrash/rados.yaml
new file mode 120000
index 00000000..d256979c
--- /dev/null
+++ b/qa/suites/rados/monthrash/rados.yaml
@@ -0,0 +1 @@
+.qa/config/rados.yaml \ No newline at end of file
diff --git a/qa/suites/rados/monthrash/supported-random-distro$ b/qa/suites/rados/monthrash/supported-random-distro$
new file mode 120000
index 00000000..7cef21ee
--- /dev/null
+++ b/qa/suites/rados/monthrash/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/monthrash/thrashers/.qa b/qa/suites/rados/monthrash/thrashers/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/monthrash/thrashers/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml b/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml
new file mode 100644
index 00000000..2d1ba882
--- /dev/null
+++ b/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml
@@ -0,0 +1,12 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(TOO_FEW_PGS\)
+tasks:
+- mon_thrash:
+ revive_delay: 90
+ thrash_delay: 1
+ thrash_store: true
+ thrash_many: true
diff --git a/qa/suites/rados/monthrash/thrashers/many.yaml b/qa/suites/rados/monthrash/thrashers/many.yaml
new file mode 100644
index 00000000..fa829b34
--- /dev/null
+++ b/qa/suites/rados/monthrash/thrashers/many.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ conf:
+ osd:
+ mon client ping interval: 4
+ mon client ping timeout: 12
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
+ thrash_many: true
+ freeze_mon_duration: 20
+ freeze_mon_probability: 10
diff --git a/qa/suites/rados/monthrash/thrashers/one.yaml b/qa/suites/rados/monthrash/thrashers/one.yaml
new file mode 100644
index 00000000..041cee0b
--- /dev/null
+++ b/qa/suites/rados/monthrash/thrashers/one.yaml
@@ -0,0 +1,9 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+tasks:
+- mon_thrash:
+ revive_delay: 20
+ thrash_delay: 1
diff --git a/qa/suites/rados/monthrash/thrashers/sync-many.yaml b/qa/suites/rados/monthrash/thrashers/sync-many.yaml
new file mode 100644
index 00000000..14f41f7f
--- /dev/null
+++ b/qa/suites/rados/monthrash/thrashers/sync-many.yaml
@@ -0,0 +1,14 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ conf:
+ mon:
+ paxos min: 10
+ paxos trim min: 10
+tasks:
+- mon_thrash:
+ revive_delay: 90
+ thrash_delay: 1
+ thrash_many: true
diff --git a/qa/suites/rados/monthrash/thrashers/sync.yaml b/qa/suites/rados/monthrash/thrashers/sync.yaml
new file mode 100644
index 00000000..08b1522c
--- /dev/null
+++ b/qa/suites/rados/monthrash/thrashers/sync.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ conf:
+ mon:
+ paxos min: 10
+ paxos trim min: 10
+tasks:
+- mon_thrash:
+ revive_delay: 90
+ thrash_delay: 1
diff --git a/qa/suites/rados/monthrash/workloads/.qa b/qa/suites/rados/monthrash/workloads/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/monthrash/workloads/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml b/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml
new file mode 100644
index 00000000..c6b00b48
--- /dev/null
+++ b/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml
@@ -0,0 +1,58 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - slow request
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- exec:
+ client.0:
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
+ - ceph_test_rados_delete_pools_parallel
diff --git a/qa/suites/rados/monthrash/workloads/rados_5925.yaml b/qa/suites/rados/monthrash/workloads/rados_5925.yaml
new file mode 100644
index 00000000..940d3a8e
--- /dev/null
+++ b/qa/suites/rados/monthrash/workloads/rados_5925.yaml
@@ -0,0 +1,9 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- exec:
+ client.0:
+ - ceph_test_rados_delete_pools_parallel --debug_objecter 20 --debug_ms 1 --debug_rados 20 --debug_monc 20
diff --git a/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml b/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml
new file mode 100644
index 00000000..f0bd5685
--- /dev/null
+++ b/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml
@@ -0,0 +1,26 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(POOL_FULL\)
+ - \(SLOW_OPS\)
+ - \(MON_DOWN\)
+ - \(PG_
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(SMALLER_PGP_NUM\)
+ - slow request
+ conf:
+ global:
+ debug objecter: 20
+ debug rados: 20
+ debug ms: 1
+ mon:
+ mon warn on pool no app: false
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
diff --git a/qa/suites/rados/monthrash/workloads/rados_mon_osdmap_prune.yaml b/qa/suites/rados/monthrash/workloads/rados_mon_osdmap_prune.yaml
new file mode 100644
index 00000000..cca902af
--- /dev/null
+++ b/qa/suites/rados/monthrash/workloads/rados_mon_osdmap_prune.yaml
@@ -0,0 +1,22 @@
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon debug extra checks: true
+ mon min osdmap epochs: 100
+ mon osdmap full prune enabled: true
+ mon osdmap full prune min: 200
+ mon osdmap full prune interval: 10
+ mon osdmap full prune txsize: 100
+ osd:
+ osd beacon report interval: 10
+ log-whitelist:
+ # setting/unsetting noup will trigger health warns,
+ # causing tests to fail due to health warns, even if
+ # the tests themselves are successful.
+ - \(OSDMAP_FLAGS\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - mon/test_mon_osdmap_prune.sh
diff --git a/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml b/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml
new file mode 100644
index 00000000..63b88c0d
--- /dev/null
+++ b/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(PG_
+ - \(MON_DOWN\)
+ - \(AUTH_BAD_CAPS\)
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - mon/pool_ops.sh
+ - mon/crush_ops.sh
+ - mon/osd.sh
+ - mon/caps.sh
+
diff --git a/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml b/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml
new file mode 100644
index 00000000..aa82d973
--- /dev/null
+++ b/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/qa/suites/rados/multimon/% b/qa/suites/rados/multimon/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/multimon/%
diff --git a/qa/suites/rados/multimon/.qa b/qa/suites/rados/multimon/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/multimon/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/multimon/clusters/.qa b/qa/suites/rados/multimon/clusters/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/multimon/clusters/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/multimon/clusters/21.yaml b/qa/suites/rados/multimon/clusters/21.yaml
new file mode 100644
index 00000000..aae96866
--- /dev/null
+++ b/qa/suites/rados/multimon/clusters/21.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mon.d, mon.g, mon.j, mon.m, mon.p, mon.s]
+- [mon.b, mon.e, mon.h, mon.k, mon.n, mon.q, mon.t, mgr.x]
+- [mon.c, mon.f, mon.i, mon.l, mon.o, mon.r, mon.u]
+openstack:
+- volumes: # attached to each instance
+ count: 1
+ size: 10 # GB
diff --git a/qa/suites/rados/multimon/clusters/3.yaml b/qa/suites/rados/multimon/clusters/3.yaml
new file mode 100644
index 00000000..11adef16
--- /dev/null
+++ b/qa/suites/rados/multimon/clusters/3.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c]
+- [mon.b, mgr.x]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
diff --git a/qa/suites/rados/multimon/clusters/6.yaml b/qa/suites/rados/multimon/clusters/6.yaml
new file mode 100644
index 00000000..29c74dc7
--- /dev/null
+++ b/qa/suites/rados/multimon/clusters/6.yaml
@@ -0,0 +1,7 @@
+roles:
+- [mon.a, mon.c, mon.e, mgr.x]
+- [mon.b, mon.d, mon.f, mgr.y]
+openstack:
+- volumes: # attached to each instance
+ count: 1
+ size: 10 # GB
diff --git a/qa/suites/rados/multimon/clusters/9.yaml b/qa/suites/rados/multimon/clusters/9.yaml
new file mode 100644
index 00000000..d5116855
--- /dev/null
+++ b/qa/suites/rados/multimon/clusters/9.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mon.d, mon.g]
+- [mon.b, mon.e, mon.h, mgr.x]
+- [mon.c, mon.f, mon.i]
+openstack:
+- volumes: # attached to each instance
+ count: 1
+ size: 10 # GB
diff --git a/qa/suites/rados/multimon/msgr b/qa/suites/rados/multimon/msgr
new file mode 120000
index 00000000..57bee80d
--- /dev/null
+++ b/qa/suites/rados/multimon/msgr
@@ -0,0 +1 @@
+.qa/msgr \ No newline at end of file
diff --git a/qa/suites/rados/multimon/msgr-failures/.qa b/qa/suites/rados/multimon/msgr-failures/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/multimon/msgr-failures/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/multimon/msgr-failures/few.yaml b/qa/suites/rados/multimon/msgr-failures/few.yaml
new file mode 100644
index 00000000..4326fe23
--- /dev/null
+++ b/qa/suites/rados/multimon/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/multimon/msgr-failures/many.yaml b/qa/suites/rados/multimon/msgr-failures/many.yaml
new file mode 100644
index 00000000..ffeb5f68
--- /dev/null
+++ b/qa/suites/rados/multimon/msgr-failures/many.yaml
@@ -0,0 +1,8 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 1000
+ mon mgr beacon grace: 90
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/multimon/no_pools.yaml b/qa/suites/rados/multimon/no_pools.yaml
new file mode 100644
index 00000000..37d50105
--- /dev/null
+++ b/qa/suites/rados/multimon/no_pools.yaml
@@ -0,0 +1,3 @@
+overrides:
+ ceph:
+ create_rbd_pool: false
diff --git a/qa/suites/rados/multimon/objectstore b/qa/suites/rados/multimon/objectstore
new file mode 120000
index 00000000..c40bd326
--- /dev/null
+++ b/qa/suites/rados/multimon/objectstore
@@ -0,0 +1 @@
+.qa/objectstore \ No newline at end of file
diff --git a/qa/suites/rados/multimon/rados.yaml b/qa/suites/rados/multimon/rados.yaml
new file mode 120000
index 00000000..d256979c
--- /dev/null
+++ b/qa/suites/rados/multimon/rados.yaml
@@ -0,0 +1 @@
+.qa/config/rados.yaml \ No newline at end of file
diff --git a/qa/suites/rados/multimon/supported-random-distro$ b/qa/suites/rados/multimon/supported-random-distro$
new file mode 120000
index 00000000..7cef21ee
--- /dev/null
+++ b/qa/suites/rados/multimon/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/multimon/tasks/.qa b/qa/suites/rados/multimon/tasks/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/multimon/tasks/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml b/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml
new file mode 100644
index 00000000..a4cea8f3
--- /dev/null
+++ b/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml
@@ -0,0 +1,11 @@
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - slow request
+ - .*clock.*skew.*
+ - clocks not synchronized
+ - overall HEALTH_
+ - \(MON_CLOCK_SKEW\)
+- mon_clock_skew_check:
+ expect-skew: false
diff --git a/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml b/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml
new file mode 100644
index 00000000..abfc3a1c
--- /dev/null
+++ b/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml
@@ -0,0 +1,24 @@
+tasks:
+- install:
+- exec:
+ mon.b:
+ - sudo systemctl stop chronyd.service || true
+ - sudo systemctl stop systemd-timesync.service || true
+ - sudo systemctl stop ntpd.service || true
+ - sudo systemctl stop ntp.service || true
+ - date -u -s @$(expr $(date -u +%s) + 2)
+- ceph:
+ wait-for-healthy: false
+ log-whitelist:
+ - .*clock.*skew.*
+ - clocks not synchronized
+ - overall HEALTH_
+ - \(MON_CLOCK_SKEW\)
+ - \(MGR_DOWN\)
+ - \(MON_DOWN\)
+ - \(PG_
+ - \(SLOW_OPS\)
+ - No standby daemons available
+ - slow request
+- mon_clock_skew_check:
+ expect-skew: true
diff --git a/qa/suites/rados/multimon/tasks/mon_recovery.yaml b/qa/suites/rados/multimon/tasks/mon_recovery.yaml
new file mode 100644
index 00000000..14da275e
--- /dev/null
+++ b/qa/suites/rados/multimon/tasks/mon_recovery.yaml
@@ -0,0 +1,10 @@
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(PG_AVAILABILITY\)
+ - \(SLOW_OPS\)
+ - slow request
+- mon_recovery:
diff --git a/qa/suites/rados/objectstore/% b/qa/suites/rados/objectstore/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/objectstore/%
diff --git a/qa/suites/rados/objectstore/.qa b/qa/suites/rados/objectstore/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/objectstore/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/objectstore/backends/.qa b/qa/suites/rados/objectstore/backends/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/objectstore/backends/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/objectstore/backends/alloc-hint.yaml b/qa/suites/rados/objectstore/backends/alloc-hint.yaml
new file mode 100644
index 00000000..047b02fa
--- /dev/null
+++ b/qa/suites/rados/objectstore/backends/alloc-hint.yaml
@@ -0,0 +1,22 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ filestore xfs extsize: true
+ osd objectstore: filestore
+
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - rados/test_alloc_hint.sh
diff --git a/qa/suites/rados/objectstore/backends/ceph_objectstore_tool.yaml b/qa/suites/rados/objectstore/backends/ceph_objectstore_tool.yaml
new file mode 100644
index 00000000..042bd065
--- /dev/null
+++ b/qa/suites/rados/objectstore/backends/ceph_objectstore_tool.yaml
@@ -0,0 +1,25 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 6
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+ osd:
+ osd objectstore: filestore
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(TOO_FEW_PGS\)
+ - \(POOL_APP_NOT_ENABLED\)
+- ceph_objectstore_tool:
+ objects: 20
diff --git a/qa/suites/rados/objectstore/backends/filejournal.yaml b/qa/suites/rados/objectstore/backends/filejournal.yaml
new file mode 100644
index 00000000..b0af8008
--- /dev/null
+++ b/qa/suites/rados/objectstore/backends/filejournal.yaml
@@ -0,0 +1,13 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- exec:
+ client.0:
+ - ceph_test_filejournal
diff --git a/qa/suites/rados/objectstore/backends/filestore-idempotent-aio-journal.yaml b/qa/suites/rados/objectstore/backends/filestore-idempotent-aio-journal.yaml
new file mode 100644
index 00000000..58b5197d
--- /dev/null
+++ b/qa/suites/rados/objectstore/backends/filestore-idempotent-aio-journal.yaml
@@ -0,0 +1,14 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ global:
+ journal aio: true
+- filestore_idempotent:
diff --git a/qa/suites/rados/objectstore/backends/filestore-idempotent.yaml b/qa/suites/rados/objectstore/backends/filestore-idempotent.yaml
new file mode 100644
index 00000000..2d3f3c69
--- /dev/null
+++ b/qa/suites/rados/objectstore/backends/filestore-idempotent.yaml
@@ -0,0 +1,11 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- filestore_idempotent:
diff --git a/qa/suites/rados/objectstore/backends/fusestore.yaml b/qa/suites/rados/objectstore/backends/fusestore.yaml
new file mode 100644
index 00000000..1c34fcae
--- /dev/null
+++ b/qa/suites/rados/objectstore/backends/fusestore.yaml
@@ -0,0 +1,9 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- workunit:
+ clients:
+ all:
+ - objectstore/test_fuse.sh
+
diff --git a/qa/suites/rados/objectstore/backends/keyvaluedb.yaml b/qa/suites/rados/objectstore/backends/keyvaluedb.yaml
new file mode 100644
index 00000000..efff8d37
--- /dev/null
+++ b/qa/suites/rados/objectstore/backends/keyvaluedb.yaml
@@ -0,0 +1,8 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- exec:
+ client.0:
+ - mkdir $TESTDIR/kvtest && cd $TESTDIR/kvtest && ceph_test_keyvaluedb
+ - rm -rf $TESTDIR/kvtest
diff --git a/qa/suites/rados/objectstore/backends/objectcacher-stress.yaml b/qa/suites/rados/objectstore/backends/objectcacher-stress.yaml
new file mode 100644
index 00000000..e407a391
--- /dev/null
+++ b/qa/suites/rados/objectstore/backends/objectcacher-stress.yaml
@@ -0,0 +1,14 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- workunit:
+ clients:
+ all:
+ - osdc/stress_objectcacher.sh
diff --git a/qa/suites/rados/objectstore/backends/objectstore.yaml b/qa/suites/rados/objectstore/backends/objectstore.yaml
new file mode 100644
index 00000000..d68270a8
--- /dev/null
+++ b/qa/suites/rados/objectstore/backends/objectstore.yaml
@@ -0,0 +1,12 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+openstack:
+- volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- exec:
+ client.0:
+ - mkdir $TESTDIR/archive/ostest && cd $TESTDIR/archive/ostest && ulimit -Sn 16384 && CEPH_ARGS="--no-log-to-stderr --log-file $TESTDIR/archive/ceph_test_objectstore.log --debug-filestore 20 --debug-bluestore 20" ceph_test_objectstore --gtest_filter=-*/3 --gtest_catch_exceptions=0
+ - rm -rf $TESTDIR/archive/ostest
diff --git a/qa/suites/rados/objectstore/supported-random-distro$ b/qa/suites/rados/objectstore/supported-random-distro$
new file mode 120000
index 00000000..7cef21ee
--- /dev/null
+++ b/qa/suites/rados/objectstore/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/perf/% b/qa/suites/rados/perf/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/perf/%
diff --git a/qa/suites/rados/perf/.qa b/qa/suites/rados/perf/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/perf/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/perf/ceph.yaml b/qa/suites/rados/perf/ceph.yaml
new file mode 100644
index 00000000..912dcbdc
--- /dev/null
+++ b/qa/suites/rados/perf/ceph.yaml
@@ -0,0 +1,14 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ wait-for-scrub: false
+ log-whitelist:
+ - \(PG_
+ - \(OSD_
+ - \(OBJECT_
+ - overall HEALTH
+- rgw: [client.0]
+- ssh_keys:
diff --git a/qa/suites/rados/perf/distros/ubuntu_16.04.yaml b/qa/suites/rados/perf/distros/ubuntu_16.04.yaml
new file mode 120000
index 00000000..a92e4060
--- /dev/null
+++ b/qa/suites/rados/perf/distros/ubuntu_16.04.yaml
@@ -0,0 +1 @@
+../../../../distros/supported-all-distro/ubuntu_16.04.yaml \ No newline at end of file
diff --git a/qa/suites/rados/perf/distros/ubuntu_latest.yaml b/qa/suites/rados/perf/distros/ubuntu_latest.yaml
new file mode 120000
index 00000000..f4d73c11
--- /dev/null
+++ b/qa/suites/rados/perf/distros/ubuntu_latest.yaml
@@ -0,0 +1 @@
+../../../../distros/supported-all-distro/ubuntu_latest.yaml \ No newline at end of file
diff --git a/qa/suites/rados/perf/objectstore b/qa/suites/rados/perf/objectstore
new file mode 120000
index 00000000..c40bd326
--- /dev/null
+++ b/qa/suites/rados/perf/objectstore
@@ -0,0 +1 @@
+.qa/objectstore \ No newline at end of file
diff --git a/qa/suites/rados/perf/openstack.yaml b/qa/suites/rados/perf/openstack.yaml
new file mode 100644
index 00000000..f4d1349b
--- /dev/null
+++ b/qa/suites/rados/perf/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
diff --git a/qa/suites/rados/perf/settings/.qa b/qa/suites/rados/perf/settings/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/perf/settings/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/perf/settings/optimized.yaml b/qa/suites/rados/perf/settings/optimized.yaml
new file mode 100644
index 00000000..5ebcb3ae
--- /dev/null
+++ b/qa/suites/rados/perf/settings/optimized.yaml
@@ -0,0 +1,76 @@
+overrides:
+ ceph:
+ conf:
+ mon:
+ debug mon: "0/0"
+ debug ms: "0/0"
+ debug paxos: "0/0"
+ osd:
+ debug filestore: "0/0"
+ debug journal: "0/0"
+ debug ms: "0/0"
+ debug osd: "0/0"
+ global:
+ auth client required: none
+ auth cluster required: none
+ auth service required: none
+ auth supported: none
+
+ debug lockdep: "0/0"
+ debug context: "0/0"
+ debug crush: "0/0"
+ debug mds: "0/0"
+ debug mds balancer: "0/0"
+ debug mds locker: "0/0"
+ debug mds log: "0/0"
+ debug mds log expire: "0/0"
+ debug mds migrator: "0/0"
+ debug buffer: "0/0"
+ debug timer: "0/0"
+ debug filer: "0/0"
+ debug striper: "0/0"
+ debug objecter: "0/0"
+ debug rados: "0/0"
+ debug rbd: "0/0"
+ debug rbd mirror: "0/0"
+ debug rbd replay: "0/0"
+ debug journaler: "0/0"
+ debug objectcacher: "0/0"
+ debug client: "0/0"
+ debug osd: "0/0"
+ debug optracker: "0/0"
+ debug objclass: "0/0"
+ debug filestore: "0/0"
+ debug journal: "0/0"
+ debug ms: "0/0"
+ debug mon: "0/0"
+ debug monc: "0/0"
+ debug paxos: "0/0"
+ debug tp: "0/0"
+ debug auth: "0/0"
+ debug crypto: "0/0"
+ debug finisher: "0/0"
+ debug heartbeatmap: "0/0"
+ debug perfcounter: "0/0"
+ debug rgw: "0/0"
+ debug rgw sync: "0/0"
+ debug civetweb: "0/0"
+ debug javaclient: "0/0"
+ debug asok: "0/0"
+ debug throttle: "0/0"
+ debug refs: "0/0"
+ debug xio: "0/0"
+ debug compressor: "0/0"
+ debug bluestore: "0/0"
+ debug bluefs: "0/0"
+ debug bdev: "0/0"
+ debug kstore: "0/0"
+ debug rocksdb: "0/0"
+ debug leveldb: "0/0"
+ debug memdb: "0/0"
+ debug kinetic: "0/0"
+ debug fuse: "0/0"
+ debug mgr: "0/0"
+ debug mgrc: "0/0"
+ debug dpdk: "0/0"
+ debug eventtrace: "0/0"
diff --git a/qa/suites/rados/perf/workloads/.qa b/qa/suites/rados/perf/workloads/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/perf/workloads/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/perf/workloads/cosbench_64K_read_write.yaml b/qa/suites/rados/perf/workloads/cosbench_64K_read_write.yaml
new file mode 100644
index 00000000..e5bad129
--- /dev/null
+++ b/qa/suites/rados/perf/workloads/cosbench_64K_read_write.yaml
@@ -0,0 +1,26 @@
+overrides:
+ rgw:
+ data_pool_pg_size: 64
+ index_pool_pg_size: 64
+tasks:
+- cbt:
+ branch: 'nautilus'
+ benchmarks:
+ cosbench:
+ obj_size: [64KB]
+ osd_ra: [4096]
+ workers: 1
+ containers_max: 1000
+ objects_max: 100
+ mode: [mix]
+ template: [default]
+ rampup: 30
+ runtime: 300
+ rampdown: 30
+ containers: ["u(1,100)"]
+ objects: ["u(1,100)"]
+ ratio: [60]
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 1
+ iterations: 1
diff --git a/qa/suites/rados/perf/workloads/cosbench_64K_write.yaml b/qa/suites/rados/perf/workloads/cosbench_64K_write.yaml
new file mode 100644
index 00000000..81973da2
--- /dev/null
+++ b/qa/suites/rados/perf/workloads/cosbench_64K_write.yaml
@@ -0,0 +1,26 @@
+overrides:
+ rgw:
+ data_pool_pg_size: 64
+ index_pool_pg_size: 64
+tasks:
+- cbt:
+ branch: 'nautilus'
+ benchmarks:
+ cosbench:
+ obj_size: [64KB]
+ osd_ra: [4096]
+ workers: 1
+ containers_max: 1000
+ objects_max: 100
+ mode: [write]
+ template: [default]
+ rampup: 30
+ runtime: 300
+ rampdown: 30
+ containers: ["u(1,100)"]
+ objects: ["u(1,100)"]
+ ratio: [100]
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 1
+ iterations: 1
diff --git a/qa/suites/rados/perf/workloads/fio_4K_rand_read.yaml b/qa/suites/rados/perf/workloads/fio_4K_rand_read.yaml
new file mode 100644
index 00000000..61fdb5a9
--- /dev/null
+++ b/qa/suites/rados/perf/workloads/fio_4K_rand_read.yaml
@@ -0,0 +1,25 @@
+tasks:
+- cbt:
+ branch: 'nautilus'
+ benchmarks:
+ librbdfio:
+ op_size: [4096]
+ time: 60
+ mode: ['randread']
+ norandommap: True
+ vol_size: 4096
+ procs_per_volume: [1]
+ volumes_per_client: [2]
+ iodepth: [32]
+ osd_ra: [4096]
+ pool_profile: 'rbd'
+ log_avg_msec: 100
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ rbd:
+ pg_size: 128
+ pgp_size: 128
+ replication: 3
diff --git a/qa/suites/rados/perf/workloads/fio_4K_rand_rw.yaml b/qa/suites/rados/perf/workloads/fio_4K_rand_rw.yaml
new file mode 100644
index 00000000..9de8196b
--- /dev/null
+++ b/qa/suites/rados/perf/workloads/fio_4K_rand_rw.yaml
@@ -0,0 +1,25 @@
+tasks:
+- cbt:
+ branch: 'nautilus'
+ benchmarks:
+ librbdfio:
+ op_size: [4096]
+ time: 60
+ mode: ['randrw']
+ norandommap: True
+ vol_size: 4096
+ procs_per_volume: [1]
+ volumes_per_client: [2]
+ iodepth: [32]
+ osd_ra: [4096]
+ pool_profile: 'rbd'
+ log_avg_msec: 100
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ rbd:
+ pg_size: 128
+ pgp_size: 128
+ replication: 3
diff --git a/qa/suites/rados/perf/workloads/fio_4M_rand_read.yaml b/qa/suites/rados/perf/workloads/fio_4M_rand_read.yaml
new file mode 100644
index 00000000..f64e9bb8
--- /dev/null
+++ b/qa/suites/rados/perf/workloads/fio_4M_rand_read.yaml
@@ -0,0 +1,25 @@
+tasks:
+- cbt:
+ branch: 'nautilus'
+ benchmarks:
+ librbdfio:
+ op_size: [4194304]
+ time: 60
+ mode: ['randread']
+ norandommap: True
+ vol_size: 4096
+ procs_per_volume: [1]
+ volumes_per_client: [2]
+ iodepth: [32]
+ osd_ra: [4096]
+ pool_profile: 'rbd'
+ log_avg_msec: 100
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ rbd:
+ pg_size: 128
+ pgp_size: 128
+ replication: 3
diff --git a/qa/suites/rados/perf/workloads/fio_4M_rand_rw.yaml b/qa/suites/rados/perf/workloads/fio_4M_rand_rw.yaml
new file mode 100644
index 00000000..369fb268
--- /dev/null
+++ b/qa/suites/rados/perf/workloads/fio_4M_rand_rw.yaml
@@ -0,0 +1,25 @@
+tasks:
+- cbt:
+ branch: 'nautilus'
+ benchmarks:
+ librbdfio:
+ op_size: [4194304]
+ time: 60
+ mode: ['randrw']
+ norandommap: True
+ vol_size: 4096
+ procs_per_volume: [1]
+ volumes_per_client: [2]
+ iodepth: [32]
+ osd_ra: [4096]
+ pool_profile: 'rbd'
+ log_avg_msec: 100
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ rbd:
+ pg_size: 128
+ pgp_size: 128
+ replication: 3
diff --git a/qa/suites/rados/perf/workloads/fio_4M_rand_write.yaml b/qa/suites/rados/perf/workloads/fio_4M_rand_write.yaml
new file mode 100644
index 00000000..2ac8ef9e
--- /dev/null
+++ b/qa/suites/rados/perf/workloads/fio_4M_rand_write.yaml
@@ -0,0 +1,25 @@
+tasks:
+- cbt:
+ branch: 'nautilus'
+ benchmarks:
+ librbdfio:
+ op_size: [4194304]
+ time: 60
+ mode: ['randwrite']
+ norandommap: True
+ vol_size: 4096
+ procs_per_volume: [1]
+ volumes_per_client: [2]
+ iodepth: [32]
+ osd_ra: [4096]
+ pool_profile: 'rbd'
+ log_avg_msec: 100
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ rbd:
+ pg_size: 128
+ pgp_size: 128
+ replication: 3
diff --git a/qa/suites/rados/perf/workloads/radosbench_4K_rand_read.yaml b/qa/suites/rados/perf/workloads/radosbench_4K_rand_read.yaml
new file mode 100644
index 00000000..420f1950
--- /dev/null
+++ b/qa/suites/rados/perf/workloads/radosbench_4K_rand_read.yaml
@@ -0,0 +1,25 @@
+tasks:
+- cbt:
+ branch: 'nautilus'
+ benchmarks:
+ radosbench:
+ concurrent_ops: 4
+ concurrent_procs: 2
+ op_size: [4096]
+ pool_monitoring_list:
+ - collectl
+ pool_profile: 'replicated'
+ run_monitoring_list:
+ - collectl
+ time: 60
+ write_only: false
+ readmode: 'rand'
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ replicated:
+ pg_size: 256
+ pgp_size: 256
+ replication: 'replicated'
diff --git a/qa/suites/rados/perf/workloads/radosbench_4K_seq_read.yaml b/qa/suites/rados/perf/workloads/radosbench_4K_seq_read.yaml
new file mode 100644
index 00000000..e32cabaa
--- /dev/null
+++ b/qa/suites/rados/perf/workloads/radosbench_4K_seq_read.yaml
@@ -0,0 +1,24 @@
+tasks:
+- cbt:
+ branch: 'nautilus'
+ benchmarks:
+ radosbench:
+ concurrent_ops: 4
+ concurrent_procs: 2
+ op_size: [4096]
+ pool_monitoring_list:
+ - collectl
+ pool_profile: 'replicated'
+ run_monitoring_list:
+ - collectl
+ time: 60
+ write_only: false
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ replicated:
+ pg_size: 256
+ pgp_size: 256
+ replication: 'replicated'
diff --git a/qa/suites/rados/perf/workloads/radosbench_4M_rand_read.yaml b/qa/suites/rados/perf/workloads/radosbench_4M_rand_read.yaml
new file mode 100644
index 00000000..27b2466a
--- /dev/null
+++ b/qa/suites/rados/perf/workloads/radosbench_4M_rand_read.yaml
@@ -0,0 +1,25 @@
+tasks:
+- cbt:
+ branch: 'nautilus'
+ benchmarks:
+ radosbench:
+ concurrent_ops: 4
+ concurrent_procs: 2
+ op_size: [4194304]
+ pool_monitoring_list:
+ - collectl
+ pool_profile: 'replicated'
+ run_monitoring_list:
+ - collectl
+ time: 60
+ write_only: false
+ readmode: 'rand'
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ replicated:
+ pg_size: 256
+ pgp_size: 256
+ replication: 'replicated'
diff --git a/qa/suites/rados/perf/workloads/radosbench_4M_seq_read.yaml b/qa/suites/rados/perf/workloads/radosbench_4M_seq_read.yaml
new file mode 100644
index 00000000..a7107d85
--- /dev/null
+++ b/qa/suites/rados/perf/workloads/radosbench_4M_seq_read.yaml
@@ -0,0 +1,24 @@
+tasks:
+- cbt:
+ branch: 'nautilus'
+ benchmarks:
+ radosbench:
+ concurrent_ops: 4
+ concurrent_procs: 2
+ op_size: [4194304]
+ pool_monitoring_list:
+ - collectl
+ pool_profile: 'replicated'
+ run_monitoring_list:
+ - collectl
+ time: 60
+ write_only: false
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ replicated:
+ pg_size: 256
+ pgp_size: 256
+ replication: 'replicated'
diff --git a/qa/suites/rados/perf/workloads/radosbench_4M_write.yaml b/qa/suites/rados/perf/workloads/radosbench_4M_write.yaml
new file mode 100644
index 00000000..0465e124
--- /dev/null
+++ b/qa/suites/rados/perf/workloads/radosbench_4M_write.yaml
@@ -0,0 +1,24 @@
+tasks:
+- cbt:
+ branch: 'nautilus'
+ benchmarks:
+ radosbench:
+ concurrent_ops: 4
+ concurrent_procs: 2
+ op_size: [4194304]
+ pool_monitoring_list:
+ - collectl
+ pool_profile: 'replicated'
+ run_monitoring_list:
+ - collectl
+ time: 60
+ write_only: true
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ replicated:
+ pg_size: 256
+ pgp_size: 256
+ replication: 'replicated'
diff --git a/qa/suites/rados/perf/workloads/sample_fio.yaml b/qa/suites/rados/perf/workloads/sample_fio.yaml
new file mode 100644
index 00000000..c4bb2422
--- /dev/null
+++ b/qa/suites/rados/perf/workloads/sample_fio.yaml
@@ -0,0 +1,25 @@
+tasks:
+- cbt:
+ branch: 'nautilus'
+ benchmarks:
+ librbdfio:
+ op_size: [4096]
+ time: 60
+ mode: ['randwrite']
+ norandommap: True
+ vol_size: 4096
+ procs_per_volume: [1]
+ volumes_per_client: [2]
+ iodepth: [32]
+ osd_ra: [4096]
+ pool_profile: 'rbd'
+ log_avg_msec: 100
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ rbd:
+ pg_size: 128
+ pgp_size: 128
+ replication: 3
diff --git a/qa/suites/rados/perf/workloads/sample_radosbench.yaml b/qa/suites/rados/perf/workloads/sample_radosbench.yaml
new file mode 100644
index 00000000..9d9f0ac2
--- /dev/null
+++ b/qa/suites/rados/perf/workloads/sample_radosbench.yaml
@@ -0,0 +1,24 @@
+tasks:
+- cbt:
+ branch: 'nautilus'
+ benchmarks:
+ radosbench:
+ concurrent_ops: 4
+ concurrent_procs: 2
+ op_size: [4096]
+ pool_monitoring_list:
+ - collectl
+ pool_profile: 'replicated'
+ run_monitoring_list:
+ - collectl
+ time: 60
+ write_only: true
+ cluster:
+ user: 'ubuntu'
+ osds_per_node: 3
+ iterations: 1
+ pool_profiles:
+ replicated:
+ pg_size: 256
+ pgp_size: 256
+ replication: 'replicated'
diff --git a/qa/suites/rados/rest/% b/qa/suites/rados/rest/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/rest/%
diff --git a/qa/suites/rados/rest/.qa b/qa/suites/rados/rest/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/rest/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/rest/mgr-restful.yaml b/qa/suites/rados/rest/mgr-restful.yaml
new file mode 100644
index 00000000..16653fc9
--- /dev/null
+++ b/qa/suites/rados/rest/mgr-restful.yaml
@@ -0,0 +1,29 @@
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, mds.a, client.a]
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MGR_DOWN\)
+ - \(PG_
+ - \(OSD_
+ - \(OBJECT_
+- exec:
+ mon.a:
+ - ceph restful create-key admin
+ - ceph restful create-self-signed-cert
+ - ceph restful restart
+- workunit:
+ clients:
+ client.a:
+ - rest/test-restful.sh
+- exec:
+ mon.a:
+ - ceph restful delete-key admin
+ - ceph restful list-keys | jq ".admin" | grep null
+
diff --git a/qa/suites/rados/rest/supported-random-distro$ b/qa/suites/rados/rest/supported-random-distro$
new file mode 120000
index 00000000..7cef21ee
--- /dev/null
+++ b/qa/suites/rados/rest/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/singleton-bluestore/% b/qa/suites/rados/singleton-bluestore/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/singleton-bluestore/%
diff --git a/qa/suites/rados/singleton-bluestore/.qa b/qa/suites/rados/singleton-bluestore/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/singleton-bluestore/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/singleton-bluestore/all/.qa b/qa/suites/rados/singleton-bluestore/all/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/singleton-bluestore/all/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/singleton-bluestore/all/cephtool.yaml b/qa/suites/rados/singleton-bluestore/all/cephtool.yaml
new file mode 100644
index 00000000..0567b603
--- /dev/null
+++ b/qa/suites/rados/singleton-bluestore/all/cephtool.yaml
@@ -0,0 +1,44 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - had wrong client addr
+ - had wrong cluster addr
+ - must scrub before tier agent can activate
+ - failsafe engaged, dropping updates
+ - failsafe disengaged, no longer dropping updates
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(SMALLER_PG_NUM\)
+ - \(SMALLER_PGP_NUM\)
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(FS_WITH_FAILED_MDS\)
+ - \(FS_DEGRADED\)
+ - \(POOL_BACKFILLFULL\)
+ - \(POOL_FULL\)
+ - \(SMALLER_PGP_NUM\)
+ - \(POOL_NEARFULL\)
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(AUTH_BAD_CAPS\)
+- workunit:
+ clients:
+ all:
+ - cephtool
+ - mon/pool_ops.sh
diff --git a/qa/suites/rados/singleton-bluestore/msgr b/qa/suites/rados/singleton-bluestore/msgr
new file mode 120000
index 00000000..57bee80d
--- /dev/null
+++ b/qa/suites/rados/singleton-bluestore/msgr
@@ -0,0 +1 @@
+.qa/msgr \ No newline at end of file
diff --git a/qa/suites/rados/singleton-bluestore/msgr-failures/.qa b/qa/suites/rados/singleton-bluestore/msgr-failures/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/singleton-bluestore/msgr-failures/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml b/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml
new file mode 100644
index 00000000..4326fe23
--- /dev/null
+++ b/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml b/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml
new file mode 100644
index 00000000..59ca5c0f
--- /dev/null
+++ b/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 1000
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/singleton-bluestore/objectstore/.qa b/qa/suites/rados/singleton-bluestore/objectstore/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/singleton-bluestore/objectstore/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/singleton-bluestore/objectstore/bluestore-bitmap.yaml b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-bitmap.yaml
new file mode 120000
index 00000000..a59cf517
--- /dev/null
+++ b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-bitmap.yaml
@@ -0,0 +1 @@
+.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file
diff --git a/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-lz4.yaml b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-lz4.yaml
new file mode 120000
index 00000000..4fb2ff6c
--- /dev/null
+++ b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-lz4.yaml
@@ -0,0 +1 @@
+.qa/objectstore/bluestore-comp-lz4.yaml \ No newline at end of file
diff --git a/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml
new file mode 120000
index 00000000..888caf55
--- /dev/null
+++ b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml
@@ -0,0 +1 @@
+.qa/objectstore/bluestore-comp-snappy.yaml \ No newline at end of file
diff --git a/qa/suites/rados/singleton-bluestore/rados.yaml b/qa/suites/rados/singleton-bluestore/rados.yaml
new file mode 120000
index 00000000..d256979c
--- /dev/null
+++ b/qa/suites/rados/singleton-bluestore/rados.yaml
@@ -0,0 +1 @@
+.qa/config/rados.yaml \ No newline at end of file
diff --git a/qa/suites/rados/singleton-bluestore/supported-random-distro$ b/qa/suites/rados/singleton-bluestore/supported-random-distro$
new file mode 120000
index 00000000..7cef21ee
--- /dev/null
+++ b/qa/suites/rados/singleton-bluestore/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/singleton-flat/.qa b/qa/suites/rados/singleton-flat/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/singleton-flat/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/singleton-flat/valgrind-leaks.yaml b/qa/suites/rados/singleton-flat/valgrind-leaks.yaml
new file mode 100644
index 00000000..d3180d9b
--- /dev/null
+++ b/qa/suites/rados/singleton-flat/valgrind-leaks.yaml
@@ -0,0 +1,36 @@
+# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
+os_type: centos
+os_version: '7.8'
+
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+
+overrides:
+ install:
+ ceph:
+ debuginfo: true
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(PG_
+ conf:
+ global:
+ osd heartbeat grace: 40
+ debug deliberately leak memory: true
+ osd max object name len: 460
+ osd max object namespace len: 64
+ mon:
+ mon osd crush smoke test: false
+ osd:
+ osd fast shutdown: false
+ valgrind:
+ mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ osd: [--tool=memcheck]
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ expect_valgrind_errors: true
diff --git a/qa/suites/rados/singleton-nomsgr/% b/qa/suites/rados/singleton-nomsgr/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/%
diff --git a/qa/suites/rados/singleton-nomsgr/.qa b/qa/suites/rados/singleton-nomsgr/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/singleton-nomsgr/all/.qa b/qa/suites/rados/singleton-nomsgr/all/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml b/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml
new file mode 100644
index 00000000..49f06b9a
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml
@@ -0,0 +1,24 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+roles:
+- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0]
+overrides:
+ ceph:
+ log-whitelist:
+ - MDS in read-only mode
+ - force file system read-only
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_FULL\)
+ - \(MDS_READ_ONLY\)
+ - \(POOL_FULL\)
+tasks:
+- install:
+- ceph:
+- rgw:
+ - client.0
+- exec:
+ client.0:
+ - ceph_test_admin_socket_output --all
diff --git a/qa/suites/rados/singleton-nomsgr/all/balancer.yaml b/qa/suites/rados/singleton-nomsgr/all/balancer.yaml
new file mode 100644
index 00000000..75410508
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/balancer.yaml
@@ -0,0 +1,10 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+tasks:
+- install:
+- ceph:
+ fs: xfs
+- cram:
+ clients:
+ client.0:
+ - src/test/cli-integration/balancer/misplaced.t
diff --git a/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
new file mode 100644
index 00000000..0a4bc498
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
@@ -0,0 +1,52 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1]
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+ debug client: 20
+ debug mds: 20
+ debug ms: 1
+- exec:
+ client.0:
+ - ceph osd pool create data_cache 4
+ - ceph osd tier add cephfs_data data_cache
+ - ceph osd tier cache-mode data_cache writeback
+ - ceph osd tier set-overlay cephfs_data data_cache
+ - ceph osd pool set data_cache hit_set_type bloom
+ - ceph osd pool set data_cache hit_set_count 8
+ - ceph osd pool set data_cache hit_set_period 3600
+ - ceph osd pool set data_cache min_read_recency_for_promote 0
+- ceph-fuse:
+- exec:
+ client.0:
+ - sudo chmod 777 $TESTDIR/mnt.0/
+ - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
+ - ls -al $TESTDIR/mnt.0/foo
+ - truncate --size 0 $TESTDIR/mnt.0/foo
+ - ls -al $TESTDIR/mnt.0/foo
+ - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
+ - ls -al $TESTDIR/mnt.0/foo
+ - cp $TESTDIR/mnt.0/foo /tmp/foo
+ - sync
+ - rados -p data_cache ls -
+ - sleep 10
+ - rados -p data_cache ls -
+ - rados -p data_cache cache-flush-evict-all
+ - rados -p data_cache ls -
+ - sleep 1
+- exec:
+ client.1:
+ - hexdump -C /tmp/foo | head
+ - hexdump -C $TESTDIR/mnt.1/foo | head
+ - cmp $TESTDIR/mnt.1/foo /tmp/foo
diff --git a/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml b/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml
new file mode 100644
index 00000000..a386e74e
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml
@@ -0,0 +1,21 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - cephtool/test_kvstore_tool.sh
diff --git a/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml b/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml
new file mode 100644
index 00000000..530dc42a
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml
@@ -0,0 +1,12 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+tasks:
+- install:
+- workunit:
+ clients:
+ all:
+ - post-file.sh
diff --git a/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml
new file mode 100644
index 00000000..e0887b85
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml
@@ -0,0 +1,38 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+- exec:
+ client.0:
+ - ceph osd pool create base-pool 4
+ - ceph osd pool application enable base-pool rados
+ - ceph osd pool create cache-pool 4
+ - ceph osd tier add base-pool cache-pool
+ - ceph osd tier cache-mode cache-pool writeback
+ - ceph osd tier set-overlay base-pool cache-pool
+ - dd if=/dev/urandom of=$TESTDIR/foo bs=1M count=1
+ - rbd import --image-format 2 $TESTDIR/foo base-pool/bar
+ - rbd snap create base-pool/bar@snap
+ - rados -p base-pool cache-flush-evict-all
+ - rbd export base-pool/bar $TESTDIR/bar
+ - rbd export base-pool/bar@snap $TESTDIR/snap
+ - cmp $TESTDIR/foo $TESTDIR/bar
+ - cmp $TESTDIR/foo $TESTDIR/snap
+ - rm $TESTDIR/foo $TESTDIR/bar $TESTDIR/snap
diff --git a/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml
new file mode 100644
index 00000000..944b2f71
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml
@@ -0,0 +1,38 @@
+# verify #13098 fix
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+overrides:
+ ceph:
+ log-whitelist:
+ - is full
+ - overall HEALTH_
+ - \(POOL_FULL\)
+ - \(POOL_NEAR_FULL\)
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(CACHE_POOL_NEAR_FULL\)
+tasks:
+- install:
+- ceph:
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+- exec:
+ client.0:
+ - ceph osd pool create ec-ca 1 1
+ - ceph osd pool create ec 1 1 erasure default
+ - ceph osd pool application enable ec rados
+ - ceph osd tier add ec ec-ca
+ - ceph osd tier cache-mode ec-ca readproxy
+ - ceph osd tier set-overlay ec ec-ca
+ - ceph osd pool set ec-ca hit_set_type bloom
+ - ceph osd pool set-quota ec-ca max_bytes 20480000
+ - ceph osd pool set-quota ec max_bytes 20480000
+ - ceph osd pool set ec-ca target_max_bytes 20480000
+ - timeout 30 rados -p ec-ca bench 30 write || true
+ - ceph osd pool set-quota ec-ca max_bytes 0
+ - ceph osd pool set-quota ec max_bytes 0
diff --git a/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml b/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml
new file mode 100644
index 00000000..a28582fd
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml
@@ -0,0 +1,20 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, client.0]
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+# we may land on ext4
+ osd max object name len: 400
+ osd max object namespace len: 64
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- workunit:
+ clients:
+ all:
+ - rados/test_health_warnings.sh
diff --git a/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml b/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml
new file mode 100644
index 00000000..62794b4b
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml
@@ -0,0 +1,27 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+overrides:
+ ceph:
+ log-whitelist:
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_FULL\)
+ - \(MDS_READ_ONLY\)
+ - large omap objects
+ - Large omap object found
+ - application not enabled
+ conf:
+ osd:
+ osd scrub backoff ratio: 0
+ osd deep scrub large omap object value sum threshold: 8800000
+ osd deep scrub large omap object key threshold: 20000
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - rados/test_large_omap_detection.py
diff --git a/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml b/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml
new file mode 100644
index 00000000..9fbdf0e0
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml
@@ -0,0 +1,16 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+overrides:
+ ceph:
+ log-whitelist:
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- install:
+- ceph:
+- exec:
+ client.0:
+ - ceph_test_lazy_omap_stats
diff --git a/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml b/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml
new file mode 100644
index 00000000..2a96b94d
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml
@@ -0,0 +1,22 @@
+roles:
+- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0]
+overrides:
+ ceph:
+ log-whitelist:
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- install:
+ extra_packages:
+ deb:
+ - libradosstriper-dev
+ - librados-dev
+ - libradospp-dev
+ rpm:
+ - libradosstriper-devel
+ - librados-devel
+ - libradospp-devel
+- ceph:
+- workunit:
+ clients:
+ all:
+ - rados/test_librados_build.sh
diff --git a/qa/suites/rados/singleton-nomsgr/all/msgr.yaml b/qa/suites/rados/singleton-nomsgr/all/msgr.yaml
new file mode 100644
index 00000000..98b50952
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/msgr.yaml
@@ -0,0 +1,21 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- exec:
+ client.0:
+ - ceph_test_async_driver
+ - ceph_test_msgr
+openstack:
+ - machine:
+ disk: 40 # GB
+ ram: 15000 # MB
+ cpus: 1
+ volumes: # attached to each instance
+ count: 0
+ size: 1 # GB
+overrides:
+ ceph:
+ conf:
+ client:
+ debug ms: 20
diff --git a/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml
new file mode 100644
index 00000000..9800b5dd
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml
@@ -0,0 +1,48 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+- - osd.3
+ - osd.4
+ - osd.5
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(PG_
+ - \(OSD_
+ - \(OBJECT_
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd min pg log entries: 25
+ osd max pg log entries: 100
+ osd max object name len: 460
+ osd max object namespace len: 64
+- exec:
+ client.0:
+ - sudo ceph osd pool create foo 64
+ - sudo ceph osd pool application enable foo rados
+ - rados -p foo bench 60 write -b 1024 --no-cleanup
+ - sudo ceph osd pool set foo size 3
+ - sudo ceph osd out 0 1
+- sleep:
+ duration: 60
+- exec:
+ client.0:
+ - sudo ceph osd in 0 1
+- sleep:
+ duration: 60
+- exec:
+ client.0:
+ - sudo ceph osd pool set foo size 2
+- sleep:
+ duration: 300
diff --git a/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml b/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml
new file mode 100644
index 00000000..c30aebb5
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml
@@ -0,0 +1,13 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - rados/test_pool_access.sh
diff --git a/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml b/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml
new file mode 100644
index 00000000..ce0cbd9f
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml
@@ -0,0 +1,58 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 20 # GB
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ conf:
+ osd:
+ osd recovery sleep: .1
+ osd objectstore: filestore
+ log-whitelist:
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(OBJECT_
+ - \(PG_
+ - overall HEALTH
+- exec:
+ osd.0:
+ - ceph osd pool create foo 32
+ - ceph osd pool application enable foo foo
+ - rados -p foo bench 30 write -b 4096 --no-cleanup
+ - ceph osd set noup
+- ceph.restart:
+ daemons: [osd.0]
+ wait-for-up: false
+ wait-for-healthy: false
+- exec:
+ osd.0:
+ - sleep 5
+ - rados -p foo bench 3 write -b 4096 --no-cleanup
+ - ceph osd unset noup
+ - sleep 10
+ - ceph osd set noup
+- ceph.restart:
+ daemons: [osd.1]
+ wait-for-up: false
+ wait-for-healthy: false
+- exec:
+ osd.0:
+ - ceph osd out 0
+ - sleep 10
+ - ceph osd unset noup
+- ceph.healthy:
+ wait-for-healthy: false # only wait for osds up and pgs clean, ignore misplaced
+- exec:
+ osd.0:
+ - ceph osd in 0
+- ceph.healthy:
diff --git a/qa/suites/rados/singleton-nomsgr/rados.yaml b/qa/suites/rados/singleton-nomsgr/rados.yaml
new file mode 120000
index 00000000..d256979c
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/rados.yaml
@@ -0,0 +1 @@
+.qa/config/rados.yaml \ No newline at end of file
diff --git a/qa/suites/rados/singleton-nomsgr/supported-random-distro$ b/qa/suites/rados/singleton-nomsgr/supported-random-distro$
new file mode 120000
index 00000000..7cef21ee
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/singleton/% b/qa/suites/rados/singleton/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/singleton/%
diff --git a/qa/suites/rados/singleton/.qa b/qa/suites/rados/singleton/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/singleton/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/singleton/all/.qa b/qa/suites/rados/singleton/all/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/singleton/all/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/singleton/all/admin-socket.yaml b/qa/suites/rados/singleton/all/admin-socket.yaml
new file mode 100644
index 00000000..13af8131
--- /dev/null
+++ b/qa/suites/rados/singleton/all/admin-socket.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - client.a
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+- admin_socket:
+ osd.0:
+ version:
+ git_version:
+ help:
+ config show:
+ config help:
+ config set filestore_dump_file /tmp/foo:
+ perf dump:
+ perf schema:
+ get_heap_property tcmalloc.max_total_thread_cache_byte:
+ set_heap_property tcmalloc.max_total_thread_cache_bytes 67108864:
+ set_heap_property tcmalloc.max_total_thread_cache_bytes 33554432:
diff --git a/qa/suites/rados/singleton/all/deduptool.yaml b/qa/suites/rados/singleton/all/deduptool.yaml
new file mode 100644
index 00000000..f2c54f1a
--- /dev/null
+++ b/qa/suites/rados/singleton/all/deduptool.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - had wrong client addr
+ - had wrong cluster addr
+ - reached quota
+ - overall HEALTH_
+ - \(POOL_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+ clients:
+ all:
+ - rados/test_dedup_tool.sh
diff --git a/qa/suites/rados/singleton/all/divergent_priors.yaml b/qa/suites/rados/singleton/all/divergent_priors.yaml
new file mode 100644
index 00000000..743d73d4
--- /dev/null
+++ b/qa/suites/rados/singleton/all/divergent_priors.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ - \(POOL_APP_NOT_ENABLED\)
+
+tasks:
+- install:
+- ceph:
+- divergent_priors:
diff --git a/qa/suites/rados/singleton/all/divergent_priors2.yaml b/qa/suites/rados/singleton/all/divergent_priors2.yaml
new file mode 100644
index 00000000..2da2c466
--- /dev/null
+++ b/qa/suites/rados/singleton/all/divergent_priors2.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ - \(POOL_APP_NOT_ENABLED\)
+
+tasks:
+- install:
+- ceph:
+- divergent_priors2:
diff --git a/qa/suites/rados/singleton/all/dump-stuck.yaml b/qa/suites/rados/singleton/all/dump-stuck.yaml
new file mode 100644
index 00000000..59085ffa
--- /dev/null
+++ b/qa/suites/rados/singleton/all/dump-stuck.yaml
@@ -0,0 +1,19 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- dump_stuck:
diff --git a/qa/suites/rados/singleton/all/ec-lost-unfound.yaml b/qa/suites/rados/singleton/all/ec-lost-unfound.yaml
new file mode 100644
index 00000000..aeb4b278
--- /dev/null
+++ b/qa/suites/rados/singleton/all/ec-lost-unfound.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ - \(SLOW_OPS\)
+ - slow request
+- ec_lost_unfound:
diff --git a/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml b/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml
new file mode 100644
index 00000000..e8201ee0
--- /dev/null
+++ b/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml
@@ -0,0 +1,17 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ clients:
+ all:
+ - erasure-code/encode-decode-non-regression.sh
diff --git a/qa/suites/rados/singleton/all/lost-unfound-delete.yaml b/qa/suites/rados/singleton/all/lost-unfound-delete.yaml
new file mode 100644
index 00000000..636cb944
--- /dev/null
+++ b/qa/suites/rados/singleton/all/lost-unfound-delete.yaml
@@ -0,0 +1,25 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ - \(SLOW_OPS\)
+ - slow request
+- rep_lost_unfound_delete:
diff --git a/qa/suites/rados/singleton/all/lost-unfound.yaml b/qa/suites/rados/singleton/all/lost-unfound.yaml
new file mode 100644
index 00000000..2f60db16
--- /dev/null
+++ b/qa/suites/rados/singleton/all/lost-unfound.yaml
@@ -0,0 +1,25 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ - \(SLOW_OPS\)
+ - slow request
+- lost_unfound:
diff --git a/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml b/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml
new file mode 100644
index 00000000..b8a7feae
--- /dev/null
+++ b/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml
@@ -0,0 +1,27 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: False
+ conf:
+ mon:
+ osd pool default size: 2
+ osd:
+ mon max pg per osd : 2
+ osd max pg per osd hard ratio : 1
+ log-whitelist:
+ - \(TOO_FEW_PGS\)
+ - \(PENDING_CREATING_PGS\)
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+ test_create_from_mon: True
+ pg_num: 2
diff --git a/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml b/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml
new file mode 100644
index 00000000..8ffc9a31
--- /dev/null
+++ b/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml
@@ -0,0 +1,32 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: False
+ conf:
+ mon:
+ osd pool default size: 2
+ osd:
+ mon max pg per osd : 1
+ osd max pg per osd hard ratio : 1
+ log-whitelist:
+ - \(TOO_FEW_PGS\)
+ - \(PG_
+ - \(PENDING_CREATING_PGS\)
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+ test_create_from_mon: False
+ pg_num: 1
+ pool_size: 2
+ from_primary: True
diff --git a/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml b/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml
new file mode 100644
index 00000000..8da365dd
--- /dev/null
+++ b/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml
@@ -0,0 +1,32 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: False
+ conf:
+ mon:
+ osd pool default size: 2
+ osd:
+ mon max pg per osd : 1
+ osd max pg per osd hard ratio : 1
+ log-whitelist:
+ - \(TOO_FEW_PGS\)
+ - \(PG_
+ - \(PENDING_CREATING_PGS\)
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+ test_create_from_mon: False
+ pg_num: 1
+ pool_size: 2
+ from_primary: False
diff --git a/qa/suites/rados/singleton/all/mon-auth-caps.yaml b/qa/suites/rados/singleton/all/mon-auth-caps.yaml
new file mode 100644
index 00000000..ae4a5d2e
--- /dev/null
+++ b/qa/suites/rados/singleton/all/mon-auth-caps.yaml
@@ -0,0 +1,17 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(AUTH_BAD_CAPS\)
+- workunit:
+ clients:
+ all:
+ - mon/auth_caps.sh
diff --git a/qa/suites/rados/singleton/all/mon-config-key-caps.yaml b/qa/suites/rados/singleton/all/mon-config-key-caps.yaml
new file mode 100644
index 00000000..0b0b95c5
--- /dev/null
+++ b/qa/suites/rados/singleton/all/mon-config-key-caps.yaml
@@ -0,0 +1,17 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(AUTH_BAD_CAPS\)
+- workunit:
+ clients:
+ all:
+ - mon/test_config_key_caps.sh
diff --git a/qa/suites/rados/singleton/all/mon-config-keys.yaml b/qa/suites/rados/singleton/all/mon-config-keys.yaml
new file mode 100644
index 00000000..7bb4f650
--- /dev/null
+++ b/qa/suites/rados/singleton/all/mon-config-keys.yaml
@@ -0,0 +1,20 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - mon/test_mon_config_key.py
diff --git a/qa/suites/rados/singleton/all/mon-config.yaml b/qa/suites/rados/singleton/all/mon-config.yaml
new file mode 100644
index 00000000..2d9de8bb
--- /dev/null
+++ b/qa/suites/rados/singleton/all/mon-config.yaml
@@ -0,0 +1,20 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - mon/config.sh
diff --git a/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled b/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled
new file mode 100644
index 00000000..7f9dd495
--- /dev/null
+++ b/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled
@@ -0,0 +1,152 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+ - osd.8
+ - osd.9
+ - osd.10
+ - osd.11
+ - osd.12
+ - osd.13
+ - osd.14
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 1 # GB
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon memory target: 134217728 # reduced to 128_M
+ rocksdb cache size: 67108864 # reduced to 64_M
+ mon osd cache size: 100000
+ mon osd cache size min: 134217728
+ osd:
+ osd memory target: 1610612736 # reduced to 1.5_G
+ osd objectstore: bluestore
+ debug bluestore: 20
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 9
+
+tasks:
+- install:
+ branch: wip-sseshasa2-testing-2019-07-30-1825 # change as appropriate
+- ceph:
+ create_rbd_pool: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(POOL_
+ - \(CACHE_POOL_
+ - \(OBJECT_
+ - \(SLOW_OPS\)
+ - \(REQUEST_SLOW\)
+ - \(TOO_FEW_PGS\)
+ - slow request
+- interactive:
+- parallel:
+ - log-mon-rss
+ - stress-tasks
+ - benchload
+- exec:
+ client.0:
+ - "ceph_test_mon_memory_target 134217728" # mon memory target
+ - "ceph_test_mon_rss_usage 134217728"
+log-mon-rss:
+- background_exec:
+ client.0:
+ - while true
+ - do /usr/bin/ceph_test_log_rss_usage ceph-mon >> /var/log/ceph/ceph-mon-rss-usage.log
+ - sleep 300 # log rss usage every 5 mins. May be modified accordingly
+ - done
+- exec:
+ client.0:
+ - sleep 37860 # sum total of the radosbench test times below plus 60 secs
+benchload: # The total radosbench test below translates to 10.5 hrs
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+stress-tasks:
+- thrashosds:
+ op_delay: 1
+ bdev_inject_crash: 1
+ bdev_inject_crash_probability: .8
+ chance_down: 80
+ chance_pgnum_grow: 3
+ chance_pgpnum_fix: 1
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 3
+ chance_thrash_pg_upmap_items: 3
+ min_in: 2
diff --git a/qa/suites/rados/singleton/all/osd-backfill.yaml b/qa/suites/rados/singleton/all/osd-backfill.yaml
new file mode 100644
index 00000000..5b374071
--- /dev/null
+++ b/qa/suites/rados/singleton/all/osd-backfill.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ conf:
+ osd:
+ osd min pg log entries: 5
+- osd_backfill:
diff --git a/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml b/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml
new file mode 100644
index 00000000..ed5b216b
--- /dev/null
+++ b/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ conf:
+ osd:
+ osd min pg log entries: 5
+ osd_fast_fail_on_connection_refused: false
+- osd_recovery.test_incomplete_pgs:
diff --git a/qa/suites/rados/singleton/all/osd-recovery.yaml b/qa/suites/rados/singleton/all/osd-recovery.yaml
new file mode 100644
index 00000000..d937a8db
--- /dev/null
+++ b/qa/suites/rados/singleton/all/osd-recovery.yaml
@@ -0,0 +1,30 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ - \(SLOW_OPS\)
+ - slow request
+ conf:
+ osd:
+ osd min pg log entries: 5
+ osd pg log trim min: 0
+ osd_fast_fail_on_connection_refused: false
+- osd_recovery:
diff --git a/qa/suites/rados/singleton/all/peer.yaml b/qa/suites/rados/singleton/all/peer.yaml
new file mode 100644
index 00000000..645034a4
--- /dev/null
+++ b/qa/suites/rados/singleton/all/peer.yaml
@@ -0,0 +1,25 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ config:
+ global:
+ osd pool default min size : 1
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- peer:
diff --git a/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml b/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml
new file mode 100644
index 00000000..2784b7e3
--- /dev/null
+++ b/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml
@@ -0,0 +1,42 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+- - mon.b
+ - mon.c
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ create_rbd_pool: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(POOL_
+ - \(CACHE_POOL_
+ - \(OBJECT_
+ - \(SLOW_OPS\)
+ - \(REQUEST_SLOW\)
+ - \(TOO_FEW_PGS\)
+ - slow request
+- exec:
+ client.0:
+ - ceph progress off
+
+- workunit:
+ clients:
+ all:
+ - mon/pg_autoscaler.sh
diff --git a/qa/suites/rados/singleton/all/pg-autoscaler.yaml b/qa/suites/rados/singleton/all/pg-autoscaler.yaml
new file mode 100644
index 00000000..72e18d52
--- /dev/null
+++ b/qa/suites/rados/singleton/all/pg-autoscaler.yaml
@@ -0,0 +1,38 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+- - mon.b
+ - mon.c
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ create_rbd_pool: false
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(POOL_
+ - \(CACHE_POOL_
+ - \(OBJECT_
+ - \(SLOW_OPS\)
+ - \(REQUEST_SLOW\)
+ - \(TOO_FEW_PGS\)
+ - slow request
+- workunit:
+ clients:
+ all:
+ - mon/pg_autoscaler.sh
diff --git a/qa/suites/rados/singleton/all/pg-removal-interruption.yaml b/qa/suites/rados/singleton/all/pg-removal-interruption.yaml
new file mode 100644
index 00000000..3ada5518
--- /dev/null
+++ b/qa/suites/rados/singleton/all/pg-removal-interruption.yaml
@@ -0,0 +1,34 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - slow request
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- exec:
+ client.0:
+ - sudo ceph osd pool create foo 128 128
+ - sudo ceph osd pool application enable foo rados
+ - sleep 5
+ - sudo ceph tell osd.0 injectargs -- --osd-inject-failure-on-pg-removal
+ - sudo ceph osd pool delete foo foo --yes-i-really-really-mean-it
+- ceph.wait_for_failure: [osd.0]
+- exec:
+ client.0:
+ - sudo ceph osd down 0
+- ceph.restart: [osd.0]
+- ceph.healthy:
diff --git a/qa/suites/rados/singleton/all/radostool.yaml b/qa/suites/rados/singleton/all/radostool.yaml
new file mode 100644
index 00000000..18277953
--- /dev/null
+++ b/qa/suites/rados/singleton/all/radostool.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - had wrong client addr
+ - had wrong cluster addr
+ - reached quota
+ - overall HEALTH_
+ - \(POOL_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+ clients:
+ all:
+ - rados/test_rados_tool.sh
diff --git a/qa/suites/rados/singleton/all/random-eio.yaml b/qa/suites/rados/singleton/all/random-eio.yaml
new file mode 100644
index 00000000..5df910b8
--- /dev/null
+++ b/qa/suites/rados/singleton/all/random-eio.yaml
@@ -0,0 +1,44 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - missing primary copy of
+ - objects unfound and apparently lost
+ - had a read error
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(PG_DEGRADED\)
+ - \(OSD_TOO_MANY_REPAIRS\)
+- full_sequential:
+ - exec:
+ client.0:
+ - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.33
+ - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.33
+ - sudo ceph osd pool create test 16 16
+ - sudo ceph osd pool set test size 3
+ - sudo ceph pg dump pgs --format=json-pretty
+ - radosbench:
+ clients: [client.0]
+ time: 360
+ type: rand
+ objectsize: 1048576
+ pool: test
+ create_pool: false
+ - exec:
+ client.0:
+ - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.0
+ - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.0
diff --git a/qa/suites/rados/singleton/all/rebuild-mondb.yaml b/qa/suites/rados/singleton/all/rebuild-mondb.yaml
new file mode 100644
index 00000000..cc1c6809
--- /dev/null
+++ b/qa/suites/rados/singleton/all/rebuild-mondb.yaml
@@ -0,0 +1,32 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - no reply from
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(MGR_DOWN\)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 30
+ - rebuild_mondb:
+ - radosbench:
+ clients: [client.0]
+ time: 30
diff --git a/qa/suites/rados/singleton/all/recovery-preemption.yaml b/qa/suites/rados/singleton/all/recovery-preemption.yaml
new file mode 100644
index 00000000..fbf1772c
--- /dev/null
+++ b/qa/suites/rados/singleton/all/recovery-preemption.yaml
@@ -0,0 +1,57 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 20 # GB
+tasks:
+- install:
+- ceph:
+ conf:
+ osd:
+ osd recovery sleep: .1
+ osd min pg log entries: 10
+ osd max pg log entries: 1000
+ osd pg log trim min: 10
+ log-whitelist:
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(OBJECT_
+ - \(PG_
+ - \(SLOW_OPS\)
+ - overall HEALTH
+ - slow request
+- exec:
+ osd.0:
+ - ceph osd pool create foo 128
+ - ceph osd pool application enable foo foo
+ - sleep 5
+- ceph.healthy:
+- exec:
+ osd.0:
+ - rados -p foo bench 30 write -b 4096 --no-cleanup
+ - ceph osd out 0
+ - sleep 5
+ - ceph osd set noup
+- ceph.restart:
+ daemons: [osd.1]
+ wait-for-up: false
+ wait-for-healthy: false
+- exec:
+ osd.0:
+ - rados -p foo bench 3 write -b 4096 --no-cleanup
+ - ceph osd unset noup
+ - sleep 10
+ - for f in 0 1 2 3 ; do sudo ceph daemon osd.$f config set osd_recovery_sleep 0 ; sudo ceph daemon osd.$f config set osd_recovery_max_active 20 ; done
+- ceph.healthy:
+- exec:
+ osd.0:
+ - egrep '(defer backfill|defer recovery)' /var/log/ceph/ceph-osd.*.log
diff --git a/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml b/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml
new file mode 100644
index 00000000..3eddce82
--- /dev/null
+++ b/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml
@@ -0,0 +1,17 @@
+roles:
+- [mon.a, mgr.x]
+- [osd.0, osd.1, osd.2, client.0]
+
+tasks:
+- install:
+- ceph:
+ fs: xfs
+ log-whitelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ - \(POOL_APP_NOT_ENABLED\)
+- resolve_stuck_peering:
+
diff --git a/qa/suites/rados/singleton/all/test-crash.yaml b/qa/suites/rados/singleton/all/test-crash.yaml
new file mode 100644
index 00000000..8002deaa
--- /dev/null
+++ b/qa/suites/rados/singleton/all/test-crash.yaml
@@ -0,0 +1,15 @@
+roles:
+ - [client.0, mon.a, mgr.x, osd.0, osd.1, osd.2]
+
+tasks:
+ - install:
+ - ceph:
+ log-whitelist:
+ - Reduced data availability
+ - OSD_.*DOWN
+ - \(RECENT_CRASH\)
+ - workunit:
+ clients:
+ client.0:
+ - rados/test_crash.sh
+ - ceph.restart: [osd.*]
diff --git a/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml b/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml
new file mode 100644
index 00000000..42c8ae39
--- /dev/null
+++ b/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml
@@ -0,0 +1,19 @@
+overrides:
+ ceph:
+ fs: ext4
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+ clients:
+ all:
+ - rados/test_envlibrados_for_rocksdb.sh
diff --git a/qa/suites/rados/singleton/all/thrash-backfill-full.yaml b/qa/suites/rados/singleton/all/thrash-backfill-full.yaml
new file mode 100644
index 00000000..5cd32bd5
--- /dev/null
+++ b/qa/suites/rados/singleton/all/thrash-backfill-full.yaml
@@ -0,0 +1,50 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+override:
+ ceph:
+ conf:
+ mon:
+ osd default pool size: 3
+ osd min pg log entries: 5
+ osd max pg log entries: 10
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - missing primary copy of
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(SLOW_OPS\)
+ - \(PG_
+ - \(OBJECT_MISPLACED\)
+ - \(OSD_
+ - \(OBJECT_
+ - \(TOO_FEW_PGS\)
+ - \(POOL_BACKFILLFULL\)
+ - slow request
+- thrashosds:
+ op_delay: 30
+ clean_interval: 120
+ chance_down: .75
+ min_live: 5
+ min_in: 5
+ chance_test_backfill_full: .5
+- radosbench:
+ clients: [client.0]
+ time: 1800
+ type: rand
+ objectsize: 1048576
diff --git a/qa/suites/rados/singleton/all/thrash-eio.yaml b/qa/suites/rados/singleton/all/thrash-eio.yaml
new file mode 100644
index 00000000..0afb6c86
--- /dev/null
+++ b/qa/suites/rados/singleton/all/thrash-eio.yaml
@@ -0,0 +1,47 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+override:
+ ceph:
+ conf:
+ mon:
+ osd default pool size: 3
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - missing primary copy of
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(SLOW_OPS\)
+ - \(PG_
+ - \(OBJECT_MISPLACED\)
+ - \(OSD_
+ - \(OBJECT_
+ - \(TOO_FEW_PGS\)
+ - slow request
+- thrashosds:
+ op_delay: 30
+ clean_interval: 120
+ chance_down: .5
+ random_eio: .33
+ min_live: 5
+ min_in: 5
+- radosbench:
+ clients: [client.0]
+ time: 720
+ type: rand
+ objectsize: 1048576
diff --git a/qa/suites/rados/singleton/all/thrash-rados/+ b/qa/suites/rados/singleton/all/thrash-rados/+
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/singleton/all/thrash-rados/+
diff --git a/qa/suites/rados/singleton/all/thrash-rados/.qa b/qa/suites/rados/singleton/all/thrash-rados/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/singleton/all/thrash-rados/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml b/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml
new file mode 100644
index 00000000..37be8df9
--- /dev/null
+++ b/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml
@@ -0,0 +1,27 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+- thrashosds:
+ op_delay: 30
+ clean_interval: 120
+ chance_down: .5
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix-small.sh
diff --git a/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml b/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml
new file mode 120000
index 00000000..9124eb1a
--- /dev/null
+++ b/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml
@@ -0,0 +1 @@
+.qa/tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml b/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
new file mode 100644
index 00000000..c0b27075
--- /dev/null
+++ b/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
@@ -0,0 +1,70 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
+tasks:
+- install:
+- ceph:
+ log-whitelist:
+ - but it is still running
+ - slow request
+ - overall HEALTH_
+ - \(CACHE_POOL_
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 500
+- background_exec:
+ mon.a:
+ - while true
+ - do sleep 30
+ - echo proxy
+ - sudo ceph osd tier cache-mode cache proxy
+ - sleep 10
+ - sudo ceph osd pool set cache cache_target_full_ratio .001
+ - echo cache-try-flush-evict-all
+ - rados -p cache cache-try-flush-evict-all
+ - sleep 5
+ - echo cache-flush-evict-all
+ - rados -p cache cache-flush-evict-all
+ - sleep 5
+ - echo remove overlay
+ - sudo ceph osd tier remove-overlay base
+ - sleep 20
+ - echo add writeback overlay
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd pool set cache cache_target_full_ratio .8
+ - sudo ceph osd tier set-overlay base cache
+ - sleep 30
+ - sudo ceph osd tier cache-mode cache readproxy
+ - done
+- rados:
+ clients: [client.0]
+ pools: [base]
+ max_seconds: 600
+ ops: 400000
+ objects: 10000
+ size: 1024
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
diff --git a/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml b/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml
new file mode 100644
index 00000000..48ef78ff
--- /dev/null
+++ b/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml
@@ -0,0 +1,32 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ config:
+ global:
+ osd pool default min size : 1
+ client:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+ log-whitelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+- watch_notify_same_primary:
+ clients: [client.0]
diff --git a/qa/suites/rados/singleton/msgr b/qa/suites/rados/singleton/msgr
new file mode 120000
index 00000000..57bee80d
--- /dev/null
+++ b/qa/suites/rados/singleton/msgr
@@ -0,0 +1 @@
+.qa/msgr \ No newline at end of file
diff --git a/qa/suites/rados/singleton/msgr-failures/.qa b/qa/suites/rados/singleton/msgr-failures/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/singleton/msgr-failures/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/singleton/msgr-failures/few.yaml b/qa/suites/rados/singleton/msgr-failures/few.yaml
new file mode 100644
index 00000000..4326fe23
--- /dev/null
+++ b/qa/suites/rados/singleton/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/singleton/msgr-failures/many.yaml b/qa/suites/rados/singleton/msgr-failures/many.yaml
new file mode 100644
index 00000000..20aeb4df
--- /dev/null
+++ b/qa/suites/rados/singleton/msgr-failures/many.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 1000
+ mon mgr beacon grace: 90
+ mon client hunt interval max multiple: 2
+ mgr:
+ debug monc: 10
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/singleton/objectstore b/qa/suites/rados/singleton/objectstore
new file mode 120000
index 00000000..c40bd326
--- /dev/null
+++ b/qa/suites/rados/singleton/objectstore
@@ -0,0 +1 @@
+.qa/objectstore \ No newline at end of file
diff --git a/qa/suites/rados/singleton/rados.yaml b/qa/suites/rados/singleton/rados.yaml
new file mode 120000
index 00000000..d256979c
--- /dev/null
+++ b/qa/suites/rados/singleton/rados.yaml
@@ -0,0 +1 @@
+.qa/config/rados.yaml \ No newline at end of file
diff --git a/qa/suites/rados/singleton/supported-random-distro$ b/qa/suites/rados/singleton/supported-random-distro$
new file mode 120000
index 00000000..7cef21ee
--- /dev/null
+++ b/qa/suites/rados/singleton/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/standalone/% b/qa/suites/rados/standalone/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/standalone/%
diff --git a/qa/suites/rados/standalone/.qa b/qa/suites/rados/standalone/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/standalone/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/standalone/supported-random-distro$ b/qa/suites/rados/standalone/supported-random-distro$
new file mode 120000
index 00000000..7cef21ee
--- /dev/null
+++ b/qa/suites/rados/standalone/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/standalone/workloads/.qa b/qa/suites/rados/standalone/workloads/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/standalone/workloads/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/standalone/workloads/crush.yaml b/qa/suites/rados/standalone/workloads/crush.yaml
new file mode 100644
index 00000000..a62a0dd8
--- /dev/null
+++ b/qa/suites/rados/standalone/workloads/crush.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - crush
diff --git a/qa/suites/rados/standalone/workloads/erasure-code.yaml b/qa/suites/rados/standalone/workloads/erasure-code.yaml
new file mode 100644
index 00000000..7d79753c
--- /dev/null
+++ b/qa/suites/rados/standalone/workloads/erasure-code.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - erasure-code
diff --git a/qa/suites/rados/standalone/workloads/mgr.yaml b/qa/suites/rados/standalone/workloads/mgr.yaml
new file mode 100644
index 00000000..997fae86
--- /dev/null
+++ b/qa/suites/rados/standalone/workloads/mgr.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - mgr
diff --git a/qa/suites/rados/standalone/workloads/misc.yaml b/qa/suites/rados/standalone/workloads/misc.yaml
new file mode 100644
index 00000000..4aa9ee27
--- /dev/null
+++ b/qa/suites/rados/standalone/workloads/misc.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - misc
diff --git a/qa/suites/rados/standalone/workloads/mon.yaml b/qa/suites/rados/standalone/workloads/mon.yaml
new file mode 100644
index 00000000..c19606f4
--- /dev/null
+++ b/qa/suites/rados/standalone/workloads/mon.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - mon
diff --git a/qa/suites/rados/standalone/workloads/osd.yaml b/qa/suites/rados/standalone/workloads/osd.yaml
new file mode 100644
index 00000000..e28b5221
--- /dev/null
+++ b/qa/suites/rados/standalone/workloads/osd.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - osd
diff --git a/qa/suites/rados/standalone/workloads/scrub.yaml b/qa/suites/rados/standalone/workloads/scrub.yaml
new file mode 100644
index 00000000..7f6fad40
--- /dev/null
+++ b/qa/suites/rados/standalone/workloads/scrub.yaml
@@ -0,0 +1,18 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ basedir: qa/standalone
+ clients:
+ all:
+ - scrub
diff --git a/qa/suites/rados/thrash-erasure-code-big/% b/qa/suites/rados/thrash-erasure-code-big/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/%
diff --git a/qa/suites/rados/thrash-erasure-code-big/.qa b/qa/suites/rados/thrash-erasure-code-big/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-big/ceph.yaml b/qa/suites/rados/thrash-erasure-code-big/ceph.yaml
new file mode 120000
index 00000000..a2fd139c
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/ceph.yaml
@@ -0,0 +1 @@
+../thrash/ceph.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-big/cluster/+ b/qa/suites/rados/thrash-erasure-code-big/cluster/+
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/cluster/+
diff --git a/qa/suites/rados/thrash-erasure-code-big/cluster/.qa b/qa/suites/rados/thrash-erasure-code-big/cluster/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/cluster/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml b/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml
new file mode 100644
index 00000000..1c45ee35
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml
@@ -0,0 +1,4 @@
+roles:
+- [osd.0, osd.1, osd.2, osd.3, client.0, mon.a]
+- [osd.4, osd.5, osd.6, osd.7, mon.b, mgr.x]
+- [osd.8, osd.9, osd.10, osd.11, mon.c]
diff --git a/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml b/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml
new file mode 100644
index 00000000..e559d912
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
diff --git a/qa/suites/rados/thrash-erasure-code-big/msgr-failures b/qa/suites/rados/thrash-erasure-code-big/msgr-failures
new file mode 120000
index 00000000..03689aa4
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/msgr-failures
@@ -0,0 +1 @@
+../thrash/msgr-failures \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-big/objectstore b/qa/suites/rados/thrash-erasure-code-big/objectstore
new file mode 120000
index 00000000..c40bd326
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/objectstore
@@ -0,0 +1 @@
+.qa/objectstore \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-big/rados.yaml b/qa/suites/rados/thrash-erasure-code-big/rados.yaml
new file mode 120000
index 00000000..d256979c
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/rados.yaml
@@ -0,0 +1 @@
+.qa/config/rados.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-big/recovery-overrides b/qa/suites/rados/thrash-erasure-code-big/recovery-overrides
new file mode 120000
index 00000000..1957f2c4
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/recovery-overrides
@@ -0,0 +1 @@
+../thrash/2-recovery-overrides \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-big/supported-random-distro$ b/qa/suites/rados/thrash-erasure-code-big/supported-random-distro$
new file mode 120000
index 00000000..7cef21ee
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/.qa b/qa/suites/rados/thrash-erasure-code-big/thrashers/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml
new file mode 100644
index 00000000..42694359
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml
@@ -0,0 +1,20 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - slow request
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 6
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgnum_shrink: 1
+ chance_pgpnum_fix: 1
+ min_in: 8
+ aggressive_pg_num_changes: false
diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml
new file mode 100644
index 00000000..13ca050f
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml
@@ -0,0 +1,19 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - slow request
+ conf:
+ osd:
+ osd debug reject backfill probability: .1
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 6
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgnum_shrink: 1
+ chance_pgpnum_fix: 1
+ min_in: 8
diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml
new file mode 100644
index 00000000..17087078
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml
@@ -0,0 +1,20 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ mon:
+ osd pool default ec fast read: true
+ osd:
+ osd debug reject backfill probability: .1
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgnum_shrink: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml
new file mode 100644
index 00000000..fb3af982
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml
@@ -0,0 +1,21 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - osd_map_cache_size
+ conf:
+ mon:
+ mon min osdmap epochs: 2
+ osd:
+ osd map cache size: 1
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+tasks:
+- thrashosds:
+ timeout: 1800
+ chance_pgnum_grow: 1
+ chance_pgnum_shrink: 1
+ chance_pgpnum_fix: 1
+ chance_test_map_discontinuity: 0.5
+ min_in: 8
diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml
new file mode 100644
index 00000000..572832d8
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 9
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 3
+ chance_pgpnum_fix: 1
+ min_in: 8
diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml
new file mode 100644
index 00000000..148d9fe5
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml
@@ -0,0 +1,15 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
+ min_in: 8
diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml
new file mode 120000
index 00000000..9124eb1a
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml
@@ -0,0 +1 @@
+.qa/tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-big/workloads/.qa b/qa/suites/rados/thrash-erasure-code-big/workloads/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/workloads/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml b/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml
new file mode 120000
index 00000000..c18bec16
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml
@@ -0,0 +1 @@
+.qa/erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml b/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml
new file mode 120000
index 00000000..d66fd796
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml
@@ -0,0 +1 @@
+.qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-isa/% b/qa/suites/rados/thrash-erasure-code-isa/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/%
diff --git a/qa/suites/rados/thrash-erasure-code-isa/.qa b/qa/suites/rados/thrash-erasure-code-isa/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-isa/arch/.qa b/qa/suites/rados/thrash-erasure-code-isa/arch/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/arch/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml b/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml
new file mode 100644
index 00000000..c2409f5d
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml
@@ -0,0 +1 @@
+arch: x86_64
diff --git a/qa/suites/rados/thrash-erasure-code-isa/ceph.yaml b/qa/suites/rados/thrash-erasure-code-isa/ceph.yaml
new file mode 120000
index 00000000..a2fd139c
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/ceph.yaml
@@ -0,0 +1 @@
+../thrash/ceph.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-isa/clusters b/qa/suites/rados/thrash-erasure-code-isa/clusters
new file mode 120000
index 00000000..7aac47be
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/clusters
@@ -0,0 +1 @@
+../thrash/clusters \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-isa/msgr-failures b/qa/suites/rados/thrash-erasure-code-isa/msgr-failures
new file mode 120000
index 00000000..03689aa4
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/msgr-failures
@@ -0,0 +1 @@
+../thrash/msgr-failures \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-isa/objectstore b/qa/suites/rados/thrash-erasure-code-isa/objectstore
new file mode 120000
index 00000000..c40bd326
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/objectstore
@@ -0,0 +1 @@
+.qa/objectstore \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-isa/rados.yaml b/qa/suites/rados/thrash-erasure-code-isa/rados.yaml
new file mode 120000
index 00000000..d256979c
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/rados.yaml
@@ -0,0 +1 @@
+.qa/config/rados.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-isa/recovery-overrides b/qa/suites/rados/thrash-erasure-code-isa/recovery-overrides
new file mode 120000
index 00000000..1957f2c4
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/recovery-overrides
@@ -0,0 +1 @@
+../thrash/2-recovery-overrides \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-isa/supported-random-distro$ b/qa/suites/rados/thrash-erasure-code-isa/supported-random-distro$
new file mode 120000
index 00000000..7cef21ee
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-isa/thrashers b/qa/suites/rados/thrash-erasure-code-isa/thrashers
new file mode 120000
index 00000000..f461dadc
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/thrashers
@@ -0,0 +1 @@
+../thrash/thrashers \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml b/qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml
new file mode 120000
index 00000000..9124eb1a
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml
@@ -0,0 +1 @@
+.qa/tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-isa/workloads/.qa b/qa/suites/rados/thrash-erasure-code-isa/workloads/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/workloads/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml b/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml
new file mode 120000
index 00000000..19342b9d
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml
@@ -0,0 +1 @@
+.qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/% b/qa/suites/rados/thrash-erasure-code-overwrites/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/%
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/.qa b/qa/suites/rados/thrash-erasure-code-overwrites/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/bluestore-bitmap.yaml b/qa/suites/rados/thrash-erasure-code-overwrites/bluestore-bitmap.yaml
new file mode 120000
index 00000000..635085f7
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/bluestore-bitmap.yaml
@@ -0,0 +1 @@
+../thrash-erasure-code/objectstore/bluestore-bitmap.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml b/qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml
new file mode 120000
index 00000000..a2fd139c
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml
@@ -0,0 +1 @@
+../thrash/ceph.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/clusters b/qa/suites/rados/thrash-erasure-code-overwrites/clusters
new file mode 120000
index 00000000..646ea04c
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/clusters
@@ -0,0 +1 @@
+../thrash-erasure-code/clusters \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/fast b/qa/suites/rados/thrash-erasure-code-overwrites/fast
new file mode 120000
index 00000000..6170b30e
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/fast
@@ -0,0 +1 @@
+../thrash-erasure-code/fast \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/msgr-failures b/qa/suites/rados/thrash-erasure-code-overwrites/msgr-failures
new file mode 120000
index 00000000..70c9ca13
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/msgr-failures
@@ -0,0 +1 @@
+../thrash-erasure-code/msgr-failures \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml b/qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml
new file mode 120000
index 00000000..017df6f6
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml
@@ -0,0 +1 @@
+../thrash-erasure-code/rados.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/recovery-overrides b/qa/suites/rados/thrash-erasure-code-overwrites/recovery-overrides
new file mode 120000
index 00000000..1957f2c4
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/recovery-overrides
@@ -0,0 +1 @@
+../thrash/2-recovery-overrides \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/supported-random-distro$ b/qa/suites/rados/thrash-erasure-code-overwrites/supported-random-distro$
new file mode 120000
index 00000000..7cef21ee
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/thrashers b/qa/suites/rados/thrash-erasure-code-overwrites/thrashers
new file mode 120000
index 00000000..40ff82cf
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/thrashers
@@ -0,0 +1 @@
+../thrash-erasure-code/thrashers \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml b/qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml
new file mode 120000
index 00000000..9124eb1a
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml
@@ -0,0 +1 @@
+.qa/tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/workloads/.qa b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml
new file mode 100644
index 00000000..d2ad70a5
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml
@@ -0,0 +1,23 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: '*'
+ thrashosds:
+ disable_objectstore_tool_tests: true
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ pool_snaps: true
+ ec_pool: true
+ erasure_code_use_overwrites: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml
new file mode 100644
index 00000000..b3f831b7
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml
@@ -0,0 +1,29 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: '*'
+ thrashosds:
+ disable_objectstore_tool_tests: true
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ ec_pool: true
+ erasure_code_use_overwrites: true
+ fast_read: true
+ op_weights:
+ read: 100
+ write: 100
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml
new file mode 100644
index 00000000..9baacef4
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml
@@ -0,0 +1,28 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: '*'
+ thrashosds:
+ disable_objectstore_tool_tests: true
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ ec_pool: true
+ erasure_code_use_overwrites: true
+ op_weights:
+ read: 100
+ write: 100
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml
new file mode 100644
index 00000000..b7c53819
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml
@@ -0,0 +1,22 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: '*'
+ thrashosds:
+ disable_objectstore_tool_tests: true
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ erasure_code_use_overwrites: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/qa/suites/rados/thrash-erasure-code-shec/% b/qa/suites/rados/thrash-erasure-code-shec/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/%
diff --git a/qa/suites/rados/thrash-erasure-code-shec/.qa b/qa/suites/rados/thrash-erasure-code-shec/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-shec/ceph.yaml b/qa/suites/rados/thrash-erasure-code-shec/ceph.yaml
new file mode 120000
index 00000000..a2fd139c
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/ceph.yaml
@@ -0,0 +1 @@
+../thrash/ceph.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-shec/clusters/+ b/qa/suites/rados/thrash-erasure-code-shec/clusters/+
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/clusters/+
diff --git a/qa/suites/rados/thrash-erasure-code-shec/clusters/.qa b/qa/suites/rados/thrash-erasure-code-shec/clusters/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/clusters/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml b/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml
new file mode 120000
index 00000000..aa883007
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml
@@ -0,0 +1 @@
+.qa/clusters/fixed-4.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml b/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml
new file mode 100644
index 00000000..e559d912
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
diff --git a/qa/suites/rados/thrash-erasure-code-shec/msgr-failures b/qa/suites/rados/thrash-erasure-code-shec/msgr-failures
new file mode 120000
index 00000000..03689aa4
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/msgr-failures
@@ -0,0 +1 @@
+../thrash/msgr-failures \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-shec/objectstore b/qa/suites/rados/thrash-erasure-code-shec/objectstore
new file mode 120000
index 00000000..c40bd326
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/objectstore
@@ -0,0 +1 @@
+.qa/objectstore \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-shec/rados.yaml b/qa/suites/rados/thrash-erasure-code-shec/rados.yaml
new file mode 120000
index 00000000..d256979c
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/rados.yaml
@@ -0,0 +1 @@
+.qa/config/rados.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-shec/recovery-overrides b/qa/suites/rados/thrash-erasure-code-shec/recovery-overrides
new file mode 120000
index 00000000..1957f2c4
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/recovery-overrides
@@ -0,0 +1 @@
+../thrash/2-recovery-overrides \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-shec/supported-random-distro$ b/qa/suites/rados/thrash-erasure-code-shec/supported-random-distro$
new file mode 120000
index 00000000..7cef21ee
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-shec/thrashers/.qa b/qa/suites/rados/thrash-erasure-code-shec/thrashers/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/thrashers/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml b/qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml
new file mode 100644
index 00000000..6f2f7a44
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml
@@ -0,0 +1,20 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - slow request
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgnum_shrink: 1
+ chance_pgpnum_fix: 1
+ min_in: 8
+ aggressive_pg_num_changes: false
diff --git a/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml b/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml
new file mode 100644
index 00000000..a438f43f
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml
@@ -0,0 +1,19 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - slow request
+ conf:
+ osd:
+ osd debug reject backfill probability: .1
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgnum_shrink: 1
+ chance_pgpnum_fix: 1
+ min_in: 8
diff --git a/qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml b/qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml
new file mode 120000
index 00000000..9124eb1a
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml
@@ -0,0 +1 @@
+.qa/tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-shec/workloads/.qa b/qa/suites/rados/thrash-erasure-code-shec/workloads/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/workloads/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml b/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml
new file mode 120000
index 00000000..8f318cc3
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml
@@ -0,0 +1 @@
+.qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code/% b/qa/suites/rados/thrash-erasure-code/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/%
diff --git a/qa/suites/rados/thrash-erasure-code/.qa b/qa/suites/rados/thrash-erasure-code/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code/ceph.yaml b/qa/suites/rados/thrash-erasure-code/ceph.yaml
new file mode 100644
index 00000000..2030acb9
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/rados/thrash-erasure-code/clusters b/qa/suites/rados/thrash-erasure-code/clusters
new file mode 120000
index 00000000..7aac47be
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/clusters
@@ -0,0 +1 @@
+../thrash/clusters \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code/fast/.qa b/qa/suites/rados/thrash-erasure-code/fast/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/fast/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code/fast/fast.yaml b/qa/suites/rados/thrash-erasure-code/fast/fast.yaml
new file mode 100644
index 00000000..8ebfee0a
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/fast/fast.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ osd pool default ec fast read: true
diff --git a/qa/suites/rados/thrash-erasure-code/fast/normal.yaml b/qa/suites/rados/thrash-erasure-code/fast/normal.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/fast/normal.yaml
diff --git a/qa/suites/rados/thrash-erasure-code/msgr-failures b/qa/suites/rados/thrash-erasure-code/msgr-failures
new file mode 120000
index 00000000..03689aa4
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/msgr-failures
@@ -0,0 +1 @@
+../thrash/msgr-failures \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code/objectstore b/qa/suites/rados/thrash-erasure-code/objectstore
new file mode 120000
index 00000000..c40bd326
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/objectstore
@@ -0,0 +1 @@
+.qa/objectstore \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code/rados.yaml b/qa/suites/rados/thrash-erasure-code/rados.yaml
new file mode 120000
index 00000000..d256979c
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/rados.yaml
@@ -0,0 +1 @@
+.qa/config/rados.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code/recovery-overrides b/qa/suites/rados/thrash-erasure-code/recovery-overrides
new file mode 120000
index 00000000..1957f2c4
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/recovery-overrides
@@ -0,0 +1 @@
+../thrash/2-recovery-overrides \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code/supported-random-distro$ b/qa/suites/rados/thrash-erasure-code/supported-random-distro$
new file mode 120000
index 00000000..7cef21ee
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/.qa b/qa/suites/rados/thrash-erasure-code/thrashers/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/thrashers/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml b/qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml
new file mode 100644
index 00000000..018267f0
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml
@@ -0,0 +1,19 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgnum_shrink: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
+ aggressive_pg_num_changes: false
diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml b/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml
new file mode 100644
index 00000000..31c19704
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml
@@ -0,0 +1,18 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .1
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgnum_shrink: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml b/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml
new file mode 100644
index 00000000..4701fae5
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml
@@ -0,0 +1,20 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ mon:
+ osd pool default ec fast read: true
+ osd:
+ osd debug reject backfill probability: .1
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgnum_shrink: 1
+ chance_pgpnum_fix: 1
+ min_in: 4
diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml b/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml
new file mode 100644
index 00000000..12c11fa3
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 9
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 3
+ chance_pgpnum_fix: 1
+ min_in: 4
diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml b/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml
new file mode 100644
index 00000000..2bbe5e5f
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml
@@ -0,0 +1,16 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 4
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
+ min_in: 4
diff --git a/qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml b/qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml
new file mode 120000
index 00000000..9124eb1a
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml
@@ -0,0 +1 @@
+.qa/tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code/workloads/.qa b/qa/suites/rados/thrash-erasure-code/workloads/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/workloads/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=clay-k=4-m=2.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=clay-k=4-m=2.yaml
new file mode 120000
index 00000000..08155ed6
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=clay-k=4-m=2.yaml
@@ -0,0 +1 @@
+.qa/erasure-code/ec-rados-plugin=clay-k=4-m=2.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml
new file mode 120000
index 00000000..af6d8042
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml
@@ -0,0 +1 @@
+.qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml
new file mode 120000
index 00000000..cdf55199
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml
@@ -0,0 +1 @@
+.qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml
new file mode 100644
index 00000000..3c2ff7af
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml
@@ -0,0 +1,27 @@
+tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ unique_pool: true
+ ec_pool: true
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ unique_pool: true
+ ec_pool: true
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ unique_pool: true
+ ec_pool: true
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ unique_pool: true
+ ec_pool: true
+ - radosbench:
+ clients: [client.0]
+ time: 150
+ unique_pool: true
+ ec_pool: true
diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml
new file mode 100644
index 00000000..e732ec6f
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml
@@ -0,0 +1,21 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ ec_pool: true
+ fast_read: true
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-many-deletes.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-many-deletes.yaml
new file mode 100644
index 00000000..25b38e14
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-many-deletes.yaml
@@ -0,0 +1,14 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 8
+ objects: 20
+ size: 16384
+ ec_pool: true
+ op_weights:
+ write: 0
+ read: 0
+ append: 10
+ delete: 20
diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml
new file mode 100644
index 00000000..a8ac3971
--- /dev/null
+++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml
@@ -0,0 +1,20 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ ec_pool: true
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/qa/suites/rados/thrash-old-clients/% b/qa/suites/rados/thrash-old-clients/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/%
diff --git a/qa/suites/rados/thrash-old-clients/.qa b/qa/suites/rados/thrash-old-clients/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/.qa b/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/2-size-2-min-size.yaml b/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/2-size-2-min-size.yaml
new file mode 120000
index 00000000..5393a755
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/2-size-2-min-size.yaml
@@ -0,0 +1 @@
+.qa/overrides/2-size-2-min-size.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/3-size-2-min-size.yaml b/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/3-size-2-min-size.yaml
new file mode 120000
index 00000000..5ff70ead
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/3-size-2-min-size.yaml
@@ -0,0 +1 @@
+.qa/overrides/3-size-2-min-size.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/1-install/.qa b/qa/suites/rados/thrash-old-clients/1-install/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/1-install/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/1-install/hammer.yaml b/qa/suites/rados/thrash-old-clients/1-install/hammer.yaml
new file mode 100644
index 00000000..ed620c68
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/1-install/hammer.yaml
@@ -0,0 +1,29 @@
+overrides:
+ ceph:
+ crush_tunables: hammer
+ conf:
+ mon:
+ mon osd initial require min compat client: hammer
+ client:
+ ms type: simple
+tasks:
+- install:
+ branch: hammer
+ downgrade_packages: ['librbd1', 'librados2']
+ exclude_packages:
+ - librados3
+ - ceph-mgr-dashboard
+ - ceph-mgr-diskprediction-local
+ - ceph-mgr-diskprediction-cloud
+ - ceph-mgr-rook
+ - ceph-mgr-ssh
+ - ceph-mgr
+ - libcephfs2
+ - libcephfs-devel
+ - libcephfs-dev
+ - libradospp-devel
+ extra_packages: ['librados2']
+- install.upgrade:
+ mon.a:
+ mon.b:
+ mon.c:
diff --git a/qa/suites/rados/thrash-old-clients/1-install/jewel.yaml b/qa/suites/rados/thrash-old-clients/1-install/jewel.yaml
new file mode 100644
index 00000000..eae5ffc2
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/1-install/jewel.yaml
@@ -0,0 +1,19 @@
+tasks:
+- install:
+ branch: jewel
+ exclude_packages:
+ - librados3
+ - ceph-mgr-dashboard
+ - ceph-mgr-diskprediction-local
+ - ceph-mgr-diskprediction-cloud
+ - ceph-mgr-rook
+ - ceph-mgr-ssh
+ - ceph-mgr
+ - libcephfs2
+ - libcephfs-devel
+ - libcephfs-dev
+ extra_packages: ['librados2']
+- install.upgrade:
+ mon.a:
+ mon.b:
+ mon.c:
diff --git a/qa/suites/rados/thrash-old-clients/1-install/luminous.yaml b/qa/suites/rados/thrash-old-clients/1-install/luminous.yaml
new file mode 100644
index 00000000..eb9a3a2e
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/1-install/luminous.yaml
@@ -0,0 +1,15 @@
+tasks:
+- install:
+ branch: luminous
+ exclude_packages:
+ - librados3
+ - ceph-mgr-dashboard
+ - ceph-mgr-diskprediction-local
+ - ceph-mgr-diskprediction-cloud
+ - ceph-mgr-rook
+ - ceph-mgr-ssh
+ extra_packages: ['librados2']
+- install.upgrade:
+ mon.a:
+ mon.b:
+ mon.c:
diff --git a/qa/suites/rados/thrash-old-clients/backoff/.qa b/qa/suites/rados/thrash-old-clients/backoff/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/backoff/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/backoff/normal.yaml b/qa/suites/rados/thrash-old-clients/backoff/normal.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/backoff/normal.yaml
diff --git a/qa/suites/rados/thrash-old-clients/backoff/peering.yaml b/qa/suites/rados/thrash-old-clients/backoff/peering.yaml
new file mode 100644
index 00000000..66d06117
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/backoff/peering.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd backoff on peering: true
diff --git a/qa/suites/rados/thrash-old-clients/backoff/peering_and_degraded.yaml b/qa/suites/rados/thrash-old-clients/backoff/peering_and_degraded.yaml
new file mode 100644
index 00000000..e6109906
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/backoff/peering_and_degraded.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd backoff on peering: true
+ osd backoff on degraded: true
diff --git a/qa/suites/rados/thrash-old-clients/ceph.yaml b/qa/suites/rados/thrash-old-clients/ceph.yaml
new file mode 100644
index 00000000..364f9d03
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/ceph.yaml
@@ -0,0 +1,7 @@
+tasks:
+- ceph:
+ mon_bind_addrvec: false
+ mon_bind_msgr2: false
+ conf:
+ global:
+ ms bind msgr2: false
diff --git a/qa/suites/rados/thrash-old-clients/clusters/+ b/qa/suites/rados/thrash-old-clients/clusters/+
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/clusters/+
diff --git a/qa/suites/rados/thrash-old-clients/clusters/.qa b/qa/suites/rados/thrash-old-clients/clusters/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/clusters/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/clusters/openstack.yaml b/qa/suites/rados/thrash-old-clients/clusters/openstack.yaml
new file mode 100644
index 00000000..b0f3b9b4
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 30 # GB
diff --git a/qa/suites/rados/thrash-old-clients/clusters/three-plus-one.yaml b/qa/suites/rados/thrash-old-clients/clusters/three-plus-one.yaml
new file mode 100644
index 00000000..35cfc3c1
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/clusters/three-plus-one.yaml
@@ -0,0 +1,14 @@
+roles:
+- [mon.a, mgr.y, osd.0, osd.1, osd.2, osd.3, client.0]
+- [mon.b, mgr.x, osd.4, osd.5, osd.6, osd.7, client.1]
+- [mon.c, osd.8, osd.9, osd.10, osd.11]
+- [client.2]
+openstack:
+- volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd shutdown pgref assert: true
diff --git a/qa/suites/rados/thrash-old-clients/d-balancer/.qa b/qa/suites/rados/thrash-old-clients/d-balancer/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/d-balancer/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/d-balancer/crush-compat.yaml b/qa/suites/rados/thrash-old-clients/d-balancer/crush-compat.yaml
new file mode 100644
index 00000000..aa867660
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/d-balancer/crush-compat.yaml
@@ -0,0 +1,6 @@
+tasks:
+- exec:
+ mon.a:
+ - while ! ceph balancer status ; do sleep 1 ; done
+ - ceph balancer mode crush-compat
+ - ceph balancer on
diff --git a/qa/suites/rados/thrash-old-clients/d-balancer/off.yaml b/qa/suites/rados/thrash-old-clients/d-balancer/off.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/d-balancer/off.yaml
diff --git a/qa/suites/rados/thrash-old-clients/distro$/.qa b/qa/suites/rados/thrash-old-clients/distro$/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/distro$/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/distro$/centos_latest.yaml b/qa/suites/rados/thrash-old-clients/distro$/centos_latest.yaml
new file mode 120000
index 00000000..bd9854e7
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/distro$/centos_latest.yaml
@@ -0,0 +1 @@
+.qa/distros/supported/centos_latest.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/distro$/ubuntu_16.04.yaml b/qa/suites/rados/thrash-old-clients/distro$/ubuntu_16.04.yaml
new file mode 120000
index 00000000..053d801e
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/distro$/ubuntu_16.04.yaml
@@ -0,0 +1 @@
+.qa/distros/all/ubuntu_16.04.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/msgr-failures/.qa b/qa/suites/rados/thrash-old-clients/msgr-failures/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/msgr-failures/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/msgr-failures/fastclose.yaml b/qa/suites/rados/thrash-old-clients/msgr-failures/fastclose.yaml
new file mode 100644
index 00000000..02121726
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/msgr-failures/fastclose.yaml
@@ -0,0 +1,8 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms tcp read timeout: 5
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/thrash-old-clients/msgr-failures/few.yaml b/qa/suites/rados/thrash-old-clients/msgr-failures/few.yaml
new file mode 100644
index 00000000..527eadb4
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/msgr-failures/few.yaml
@@ -0,0 +1,9 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
+ osd:
+ osd heartbeat use min delay socket: true
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/thrash-old-clients/msgr-failures/osd-delay.yaml b/qa/suites/rados/thrash-old-clients/msgr-failures/osd-delay.yaml
new file mode 100644
index 00000000..91c14725
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/msgr-failures/osd-delay.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms inject delay type: osd
+ ms inject delay probability: .005
+ ms inject delay max: 1
+ ms inject internal delays: .002
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/thrash-old-clients/msgr/.qa b/qa/suites/rados/thrash-old-clients/msgr/.qa
new file mode 120000
index 00000000..fea2489f
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/msgr/.qa
@@ -0,0 +1 @@
+../.qa \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/msgr/async-v1only.yaml b/qa/suites/rados/thrash-old-clients/msgr/async-v1only.yaml
new file mode 120000
index 00000000..9673dbb0
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/msgr/async-v1only.yaml
@@ -0,0 +1 @@
+.qa/msgr/async-v1only.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/msgr/async.yaml b/qa/suites/rados/thrash-old-clients/msgr/async.yaml
new file mode 120000
index 00000000..b7f05e4c
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/msgr/async.yaml
@@ -0,0 +1 @@
+.qa/msgr/async.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/msgr/random.yaml b/qa/suites/rados/thrash-old-clients/msgr/random.yaml
new file mode 120000
index 00000000..e0dcb145
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/msgr/random.yaml
@@ -0,0 +1 @@
+.qa/msgr/random.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/msgr/simple.yaml b/qa/suites/rados/thrash-old-clients/msgr/simple.yaml
new file mode 120000
index 00000000..780dc0ea
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/msgr/simple.yaml
@@ -0,0 +1 @@
+.qa/msgr/simple.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/rados.yaml b/qa/suites/rados/thrash-old-clients/rados.yaml
new file mode 120000
index 00000000..d256979c
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/rados.yaml
@@ -0,0 +1 @@
+.qa/config/rados.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/thrashers/.qa b/qa/suites/rados/thrash-old-clients/thrashers/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/thrashers/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/thrashers/careful.yaml b/qa/suites/rados/thrash-old-clients/thrashers/careful.yaml
new file mode 100644
index 00000000..df77f73a
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/thrashers/careful.yaml
@@ -0,0 +1,25 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+ osd snap trim sleep: 2
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ aggressive_pg_num_changes: false
diff --git a/qa/suites/rados/thrash-old-clients/thrashers/default.yaml b/qa/suites/rados/thrash-old-clients/thrashers/default.yaml
new file mode 100644
index 00000000..e8e2007f
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/thrashers/default.yaml
@@ -0,0 +1,24 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+ osd snap trim sleep: 2
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
diff --git a/qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml b/qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml
new file mode 100644
index 00000000..7b55097f
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml
@@ -0,0 +1,26 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - osd_map_cache_size
+ conf:
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+ osd:
+ osd map cache size: 1
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd scrub during recovery: false
+ osd max backfills: 6
+tasks:
+- thrashosds:
+ timeout: 1800
+ chance_pgnum_grow: 0.25
+ chance_pgpnum_fix: 0.25
+ chance_test_map_discontinuity: 2
diff --git a/qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml b/qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml
new file mode 100644
index 00000000..91d2173e
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml
@@ -0,0 +1,22 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ journal throttle high multiple: 2
+ journal throttle max multiple: 10
+ filestore queue throttle high multiple: 2
+ filestore queue throttle max multiple: 10
+ osd max backfills: 9
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 3
+ chance_pgpnum_fix: 1
+openstack:
+- volumes:
+ size: 50
diff --git a/qa/suites/rados/thrash-old-clients/thrashers/none.yaml b/qa/suites/rados/thrash-old-clients/thrashers/none.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/thrashers/none.yaml
diff --git a/qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml b/qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml
new file mode 100644
index 00000000..8721fd18
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml
@@ -0,0 +1,24 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ filestore odsync write: true
+ osd max backfills: 2
+ osd snap trim sleep: .5
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
diff --git a/qa/suites/rados/thrash-old-clients/thrashosds-health.yaml b/qa/suites/rados/thrash-old-clients/thrashosds-health.yaml
new file mode 120000
index 00000000..9124eb1a
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/thrashosds-health.yaml
@@ -0,0 +1 @@
+.qa/tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/workloads/.qa b/qa/suites/rados/thrash-old-clients/workloads/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/workloads/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml b/qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml
new file mode 100644
index 00000000..fc1f5b45
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml
@@ -0,0 +1,34 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 2
+- rados:
+ clients: [client.2]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/qa/suites/rados/thrash-old-clients/workloads/radosbench.yaml b/qa/suites/rados/thrash-old-clients/workloads/radosbench.yaml
new file mode 100644
index 00000000..d0022fef
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/workloads/radosbench.yaml
@@ -0,0 +1,41 @@
+overrides:
+ ceph:
+ conf:
+ client.2:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+tasks:
+- full_sequential:
+ - radosbench:
+ objectsize: 0
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ objectsize: 0
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ objectsize: 0
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ objectsize: 0
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ objectsize: 0
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ objectsize: 0
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ objectsize: 0
+ clients: [client.2]
+ time: 90
+ - radosbench:
+ objectsize: 0
+ clients: [client.2]
+ time: 90
diff --git a/qa/suites/rados/thrash-old-clients/workloads/rbd_cls.yaml b/qa/suites/rados/thrash-old-clients/workloads/rbd_cls.yaml
new file mode 100644
index 00000000..31ccad9f
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/workloads/rbd_cls.yaml
@@ -0,0 +1,7 @@
+meta:
+- desc: |
+ rbd object class functional tests
+tasks:
+- exec:
+ client.2:
+ - ceph_test_cls_rbd --gtest_filter=-TestClsRbd.get_features:TestClsRbd.parents
diff --git a/qa/suites/rados/thrash-old-clients/workloads/snaps-few-objects.yaml b/qa/suites/rados/thrash-old-clients/workloads/snaps-few-objects.yaml
new file mode 100644
index 00000000..f0a5735a
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/workloads/snaps-few-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+ clients: [client.2]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/qa/suites/rados/thrash-old-clients/workloads/test_rbd_api.yaml b/qa/suites/rados/thrash-old-clients/workloads/test_rbd_api.yaml
new file mode 100644
index 00000000..39617b37
--- /dev/null
+++ b/qa/suites/rados/thrash-old-clients/workloads/test_rbd_api.yaml
@@ -0,0 +1,8 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+workload:
+- workunit:
+ clients:
+ client.2:
+ - rbd/test_librbd.sh
diff --git a/qa/suites/rados/thrash/% b/qa/suites/rados/thrash/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash/%
diff --git a/qa/suites/rados/thrash/.qa b/qa/suites/rados/thrash/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash/0-size-min-size-overrides/.qa b/qa/suites/rados/thrash/0-size-min-size-overrides/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash/0-size-min-size-overrides/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml b/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml
new file mode 120000
index 00000000..5393a755
--- /dev/null
+++ b/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml
@@ -0,0 +1 @@
+.qa/overrides/2-size-2-min-size.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml b/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml
new file mode 120000
index 00000000..5ff70ead
--- /dev/null
+++ b/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml
@@ -0,0 +1 @@
+.qa/overrides/3-size-2-min-size.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash/1-pg-log-overrides/.qa b/qa/suites/rados/thrash/1-pg-log-overrides/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash/1-pg-log-overrides/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml b/qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml
diff --git a/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml b/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml
new file mode 120000
index 00000000..abd86d7d
--- /dev/null
+++ b/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml
@@ -0,0 +1 @@
+.qa/overrides/short_pg_log.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash/2-recovery-overrides/$ b/qa/suites/rados/thrash/2-recovery-overrides/$
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash/2-recovery-overrides/$
diff --git a/qa/suites/rados/thrash/2-recovery-overrides/.qa b/qa/suites/rados/thrash/2-recovery-overrides/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash/2-recovery-overrides/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash/2-recovery-overrides/default.yaml b/qa/suites/rados/thrash/2-recovery-overrides/default.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash/2-recovery-overrides/default.yaml
diff --git a/qa/suites/rados/thrash/2-recovery-overrides/more-active-recovery.yaml b/qa/suites/rados/thrash/2-recovery-overrides/more-active-recovery.yaml
new file mode 120000
index 00000000..47afd702
--- /dev/null
+++ b/qa/suites/rados/thrash/2-recovery-overrides/more-active-recovery.yaml
@@ -0,0 +1 @@
+.qa/overrides/more-active-recovery.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash/backoff/.qa b/qa/suites/rados/thrash/backoff/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash/backoff/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash/backoff/normal.yaml b/qa/suites/rados/thrash/backoff/normal.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash/backoff/normal.yaml
diff --git a/qa/suites/rados/thrash/backoff/peering.yaml b/qa/suites/rados/thrash/backoff/peering.yaml
new file mode 100644
index 00000000..66d06117
--- /dev/null
+++ b/qa/suites/rados/thrash/backoff/peering.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd backoff on peering: true
diff --git a/qa/suites/rados/thrash/backoff/peering_and_degraded.yaml b/qa/suites/rados/thrash/backoff/peering_and_degraded.yaml
new file mode 100644
index 00000000..e6109906
--- /dev/null
+++ b/qa/suites/rados/thrash/backoff/peering_and_degraded.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd backoff on peering: true
+ osd backoff on degraded: true
diff --git a/qa/suites/rados/thrash/ceph.yaml b/qa/suites/rados/thrash/ceph.yaml
new file mode 100644
index 00000000..2030acb9
--- /dev/null
+++ b/qa/suites/rados/thrash/ceph.yaml
@@ -0,0 +1,3 @@
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/rados/thrash/clusters/+ b/qa/suites/rados/thrash/clusters/+
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash/clusters/+
diff --git a/qa/suites/rados/thrash/clusters/.qa b/qa/suites/rados/thrash/clusters/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash/clusters/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash/clusters/fixed-2.yaml b/qa/suites/rados/thrash/clusters/fixed-2.yaml
new file mode 120000
index 00000000..230ff0fd
--- /dev/null
+++ b/qa/suites/rados/thrash/clusters/fixed-2.yaml
@@ -0,0 +1 @@
+.qa/clusters/fixed-2.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash/clusters/openstack.yaml b/qa/suites/rados/thrash/clusters/openstack.yaml
new file mode 100644
index 00000000..b0f3b9b4
--- /dev/null
+++ b/qa/suites/rados/thrash/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 30 # GB
diff --git a/qa/suites/rados/thrash/crc-failures/bad_map_crc_failure.yaml b/qa/suites/rados/thrash/crc-failures/bad_map_crc_failure.yaml
new file mode 100644
index 00000000..1e04fb36
--- /dev/null
+++ b/qa/suites/rados/thrash/crc-failures/bad_map_crc_failure.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd inject bad map crc probability: 0.1
+ log-whitelist:
+ - failed to encode map
diff --git a/qa/suites/rados/thrash/crc-failures/default.yaml b/qa/suites/rados/thrash/crc-failures/default.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash/crc-failures/default.yaml
diff --git a/qa/suites/rados/thrash/d-balancer/.qa b/qa/suites/rados/thrash/d-balancer/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash/d-balancer/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash/d-balancer/crush-compat.yaml b/qa/suites/rados/thrash/d-balancer/crush-compat.yaml
new file mode 100644
index 00000000..aa867660
--- /dev/null
+++ b/qa/suites/rados/thrash/d-balancer/crush-compat.yaml
@@ -0,0 +1,6 @@
+tasks:
+- exec:
+ mon.a:
+ - while ! ceph balancer status ; do sleep 1 ; done
+ - ceph balancer mode crush-compat
+ - ceph balancer on
diff --git a/qa/suites/rados/thrash/d-balancer/off.yaml b/qa/suites/rados/thrash/d-balancer/off.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash/d-balancer/off.yaml
diff --git a/qa/suites/rados/thrash/d-balancer/upmap.yaml b/qa/suites/rados/thrash/d-balancer/upmap.yaml
new file mode 100644
index 00000000..788eebee
--- /dev/null
+++ b/qa/suites/rados/thrash/d-balancer/upmap.yaml
@@ -0,0 +1,7 @@
+tasks:
+- exec:
+ mon.a:
+ - while ! ceph balancer status ; do sleep 1 ; done
+ - ceph osd set-require-min-compat-client luminous
+ - ceph balancer mode upmap
+ - ceph balancer on
diff --git a/qa/suites/rados/thrash/msgr b/qa/suites/rados/thrash/msgr
new file mode 120000
index 00000000..57bee80d
--- /dev/null
+++ b/qa/suites/rados/thrash/msgr
@@ -0,0 +1 @@
+.qa/msgr \ No newline at end of file
diff --git a/qa/suites/rados/thrash/msgr-failures/.qa b/qa/suites/rados/thrash/msgr-failures/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash/msgr-failures/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash/msgr-failures/fastclose.yaml b/qa/suites/rados/thrash/msgr-failures/fastclose.yaml
new file mode 100644
index 00000000..02121726
--- /dev/null
+++ b/qa/suites/rados/thrash/msgr-failures/fastclose.yaml
@@ -0,0 +1,8 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms tcp read timeout: 5
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/thrash/msgr-failures/few.yaml b/qa/suites/rados/thrash/msgr-failures/few.yaml
new file mode 100644
index 00000000..527eadb4
--- /dev/null
+++ b/qa/suites/rados/thrash/msgr-failures/few.yaml
@@ -0,0 +1,9 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
+ osd:
+ osd heartbeat use min delay socket: true
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml b/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml
new file mode 100644
index 00000000..91c14725
--- /dev/null
+++ b/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 2500
+ ms inject delay type: osd
+ ms inject delay probability: .005
+ ms inject delay max: 1
+ ms inject internal delays: .002
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/thrash/objectstore b/qa/suites/rados/thrash/objectstore
new file mode 120000
index 00000000..c40bd326
--- /dev/null
+++ b/qa/suites/rados/thrash/objectstore
@@ -0,0 +1 @@
+.qa/objectstore \ No newline at end of file
diff --git a/qa/suites/rados/thrash/rados.yaml b/qa/suites/rados/thrash/rados.yaml
new file mode 120000
index 00000000..d256979c
--- /dev/null
+++ b/qa/suites/rados/thrash/rados.yaml
@@ -0,0 +1 @@
+.qa/config/rados.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash/supported-random-distro$ b/qa/suites/rados/thrash/supported-random-distro$
new file mode 120000
index 00000000..7cef21ee
--- /dev/null
+++ b/qa/suites/rados/thrash/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file
diff --git a/qa/suites/rados/thrash/thrashers/.qa b/qa/suites/rados/thrash/thrashers/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash/thrashers/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash/thrashers/careful.yaml b/qa/suites/rados/thrash/thrashers/careful.yaml
new file mode 100644
index 00000000..85e0c268
--- /dev/null
+++ b/qa/suites/rados/thrash/thrashers/careful.yaml
@@ -0,0 +1,26 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+ osd snap trim sleep: 2
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgnum_shrink: 1
+ chance_pgpnum_fix: 1
+ aggressive_pg_num_changes: false
diff --git a/qa/suites/rados/thrash/thrashers/default.yaml b/qa/suites/rados/thrash/thrashers/default.yaml
new file mode 100644
index 00000000..536e85cb
--- /dev/null
+++ b/qa/suites/rados/thrash/thrashers/default.yaml
@@ -0,0 +1,26 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 3
+ osd snap trim sleep: 2
+ osd delete sleep: 1
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgnum_shrink: 1
+ chance_pgpnum_fix: 1
diff --git a/qa/suites/rados/thrash/thrashers/mapgap.yaml b/qa/suites/rados/thrash/thrashers/mapgap.yaml
new file mode 100644
index 00000000..bbc3dbdc
--- /dev/null
+++ b/qa/suites/rados/thrash/thrashers/mapgap.yaml
@@ -0,0 +1,27 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ - osd_map_cache_size
+ conf:
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+ osd:
+ osd map cache size: 1
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd scrub during recovery: false
+ osd max backfills: 6
+tasks:
+- thrashosds:
+ timeout: 1800
+ chance_pgnum_grow: 0.25
+ chance_pgnum_shrink: 0.25
+ chance_pgpnum_fix: 0.25
+ chance_test_map_discontinuity: 2
diff --git a/qa/suites/rados/thrash/thrashers/morepggrow.yaml b/qa/suites/rados/thrash/thrashers/morepggrow.yaml
new file mode 100644
index 00000000..91d2173e
--- /dev/null
+++ b/qa/suites/rados/thrash/thrashers/morepggrow.yaml
@@ -0,0 +1,22 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ journal throttle high multiple: 2
+ journal throttle max multiple: 10
+ filestore queue throttle high multiple: 2
+ filestore queue throttle max multiple: 10
+ osd max backfills: 9
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 3
+ chance_pgpnum_fix: 1
+openstack:
+- volumes:
+ size: 50
diff --git a/qa/suites/rados/thrash/thrashers/none.yaml b/qa/suites/rados/thrash/thrashers/none.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/thrash/thrashers/none.yaml
diff --git a/qa/suites/rados/thrash/thrashers/pggrow.yaml b/qa/suites/rados/thrash/thrashers/pggrow.yaml
new file mode 100644
index 00000000..8721fd18
--- /dev/null
+++ b/qa/suites/rados/thrash/thrashers/pggrow.yaml
@@ -0,0 +1,24 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+ conf:
+ osd:
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ filestore odsync write: true
+ osd max backfills: 2
+ osd snap trim sleep: .5
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 2
+ chance_pgpnum_fix: 1
diff --git a/qa/suites/rados/thrash/thrashosds-health.yaml b/qa/suites/rados/thrash/thrashosds-health.yaml
new file mode 120000
index 00000000..9124eb1a
--- /dev/null
+++ b/qa/suites/rados/thrash/thrashosds-health.yaml
@@ -0,0 +1 @@
+.qa/tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/qa/suites/rados/thrash/workloads/.qa b/qa/suites/rados/thrash/workloads/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml b/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml
new file mode 100644
index 00000000..8c9764ad
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ conf:
+ client.0:
+ admin socket: /var/run/ceph/ceph-$name.asok
+tasks:
+- radosbench:
+ clients: [client.0]
+ time: 150
+- admin_socket:
+ client.0:
+ objecter_requests:
+ test: "http://git.ceph.com/?p={repo};a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}"
diff --git a/qa/suites/rados/thrash/workloads/cache-agent-big.yaml b/qa/suites/rados/thrash/workloads/cache-agent-big.yaml
new file mode 100644
index 00000000..31a964d1
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/cache-agent-big.yaml
@@ -0,0 +1,36 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+ conf:
+ osd:
+ # override short_pg_log_entries.yaml (which sets these under [global])
+ osd_min_pg_log_entries: 3000
+ osd_max_pg_log_entries: 3000
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd erasure-code-profile set myprofile crush-failure-domain=osd m=2 k=2
+ - sudo ceph osd pool create base 4 4 erasure myprofile
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool set base min_size 2
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 5000
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 10000
+ objects: 6600
+ max_seconds: 1200
+ size: 1024
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
diff --git a/qa/suites/rados/thrash/workloads/cache-agent-small.yaml b/qa/suites/rados/thrash/workloads/cache-agent-small.yaml
new file mode 100644
index 00000000..f082b0b9
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/cache-agent-small.yaml
@@ -0,0 +1,34 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+ conf:
+ osd:
+ # override short_pg_log_entries.yaml (which sets these under [global])
+ osd_min_pg_log_entries: 3000
+ osd_max_pg_log_entries: 3000
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 2
+ - sudo ceph osd pool set cache min_write_recency_for_promote 2
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
diff --git a/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml b/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml
new file mode 100644
index 00000000..b84d4d95
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml
@@ -0,0 +1,39 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+ conf:
+ osd:
+ # override short_pg_log_entries.yaml (which sets these under [global])
+ osd_min_pg_log_entries: 3000
+ osd_max_pg_log_entries: 3000
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache readproxy
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml b/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml
new file mode 100644
index 00000000..8d712e86
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml
@@ -0,0 +1,44 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+ conf:
+ osd:
+ # override short_pg_log_entries.yaml (which sets these under [global])
+ osd_min_pg_log_entries: 3000
+ osd_max_pg_log_entries: 3000
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 0
+ - sudo ceph osd pool set cache min_write_recency_for_promote 0
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+openstack:
+ - machine:
+ ram: 15000 # MB
diff --git a/qa/suites/rados/thrash/workloads/cache-snaps.yaml b/qa/suites/rados/thrash/workloads/cache-snaps.yaml
new file mode 100644
index 00000000..7ece997e
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/cache-snaps.yaml
@@ -0,0 +1,39 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+ conf:
+ osd:
+ # override short_pg_log_entries.yaml (which sets these under [global])
+ osd_min_pg_log_entries: 3000
+ osd_max_pg_log_entries: 3000
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache target_max_objects 250
+ - sudo ceph osd pool set cache min_read_recency_for_promote 2
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/qa/suites/rados/thrash/workloads/cache.yaml b/qa/suites/rados/thrash/workloads/cache.yaml
new file mode 100644
index 00000000..42cfa6cb
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/cache.yaml
@@ -0,0 +1,36 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - must scrub before tier agent can activate
+ conf:
+ osd:
+ # override short_pg_log_entries.yaml (which sets these under [global])
+ osd_min_pg_log_entries: 3000
+ osd_max_pg_log_entries: 3000
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 3600
+ - sudo ceph osd pool set cache min_read_recency_for_promote 0
+ - sudo ceph osd pool set cache min_write_recency_for_promote 0
+- rados:
+ clients: [client.0]
+ pools: [base]
+ ops: 4000
+ objects: 500
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
+ cache_flush: 50
+ cache_try_flush: 50
+ cache_evict: 50
diff --git a/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml b/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml
new file mode 100644
index 00000000..1f0759d9
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml
@@ -0,0 +1,18 @@
+override:
+ conf:
+ osd:
+ osd deep scrub update digest min age: 0
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ pool_snaps: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/qa/suites/rados/thrash/workloads/rados_api_tests.yaml b/qa/suites/rados/thrash/workloads/rados_api_tests.yaml
new file mode 100644
index 00000000..7c23a5ca
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/rados_api_tests.yaml
@@ -0,0 +1,20 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(PG_AVAILABILITY\)
+ crush_tunables: jewel
+ conf:
+ client:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+ mon:
+ mon warn on pool no app: false
+ debug mgrc: 20
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - rados/test.sh
diff --git a/qa/suites/rados/thrash/workloads/radosbench-high-concurrency.yaml b/qa/suites/rados/thrash/workloads/radosbench-high-concurrency.yaml
new file mode 100644
index 00000000..902c4b56
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/radosbench-high-concurrency.yaml
@@ -0,0 +1,49 @@
+overrides:
+ ceph:
+ conf:
+ client.0:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ concurrency: 128
+ size: 8192
+ time: 90
diff --git a/qa/suites/rados/thrash/workloads/radosbench.yaml b/qa/suites/rados/thrash/workloads/radosbench.yaml
new file mode 100644
index 00000000..1b25004a
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/radosbench.yaml
@@ -0,0 +1,33 @@
+overrides:
+ ceph:
+ conf:
+ client.0:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
diff --git a/qa/suites/rados/thrash/workloads/redirect.yaml b/qa/suites/rados/thrash/workloads/redirect.yaml
new file mode 100644
index 00000000..bebce845
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/redirect.yaml
@@ -0,0 +1,15 @@
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create low_tier 4
+- rados:
+ clients: [client.0]
+ low_tier_pool: 'low_tier'
+ ops: 4000
+ objects: 500
+ set_redirect: true
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
diff --git a/qa/suites/rados/thrash/workloads/redirect_promote_tests.yaml b/qa/suites/rados/thrash/workloads/redirect_promote_tests.yaml
new file mode 100644
index 00000000..c2787c43
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/redirect_promote_tests.yaml
@@ -0,0 +1,14 @@
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create low_tier 4
+- rados:
+ clients: [client.0]
+ low_tier_pool: 'low_tier'
+ ops: 4000
+ objects: 500
+ set_redirect: true
+ op_weights:
+ set_redirect: 100
+ read: 50
+ tier_promote: 30
diff --git a/qa/suites/rados/thrash/workloads/redirect_set_object.yaml b/qa/suites/rados/thrash/workloads/redirect_set_object.yaml
new file mode 100644
index 00000000..06ba60c7
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/redirect_set_object.yaml
@@ -0,0 +1,13 @@
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create low_tier 4
+- rados:
+ clients: [client.0]
+ low_tier_pool: 'low_tier'
+ ops: 4000
+ objects: 500
+ set_redirect: true
+ op_weights:
+ set_redirect: 100
+ copy_from: 100
diff --git a/qa/suites/rados/thrash/workloads/set-chunks-read.yaml b/qa/suites/rados/thrash/workloads/set-chunks-read.yaml
new file mode 100644
index 00000000..1abbdd75
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/set-chunks-read.yaml
@@ -0,0 +1,13 @@
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create low_tier 4
+- rados:
+ clients: [client.0]
+ low_tier_pool: 'low_tier'
+ ops: 4000
+ objects: 300
+ set_chunk: true
+ op_weights:
+ chunk_read: 100
+ tier_promote: 10
diff --git a/qa/suites/rados/thrash/workloads/small-objects.yaml b/qa/suites/rados/thrash/workloads/small-objects.yaml
new file mode 100644
index 00000000..f5a18ae6
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/small-objects.yaml
@@ -0,0 +1,24 @@
+overrides:
+ ceph:
+ crush_tunables: jewel
+ conf:
+ mon:
+ mon osd initial require min compat client: jewel
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400000
+ max_seconds: 600
+ max_in_flight: 64
+ objects: 1024
+ size: 16384
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml b/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml
new file mode 100644
index 00000000..aa82d973
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml
@@ -0,0 +1,13 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
diff --git a/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml b/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml
new file mode 100644
index 00000000..606dcae6
--- /dev/null
+++ b/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml
@@ -0,0 +1,8 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_fadvise_dontneed: true
+ op_weights:
+ write: 100
diff --git a/qa/suites/rados/upgrade/.qa b/qa/suites/rados/upgrade/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/upgrade/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/upgrade/mimic-x-singleton b/qa/suites/rados/upgrade/mimic-x-singleton
new file mode 120000
index 00000000..ebecaf67
--- /dev/null
+++ b/qa/suites/rados/upgrade/mimic-x-singleton
@@ -0,0 +1 @@
+../../upgrade/mimic-x-singleton \ No newline at end of file
diff --git a/qa/suites/rados/verify/% b/qa/suites/rados/verify/%
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/verify/%
diff --git a/qa/suites/rados/verify/.qa b/qa/suites/rados/verify/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/verify/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/verify/ceph.yaml b/qa/suites/rados/verify/ceph.yaml
new file mode 100644
index 00000000..c0857e14
--- /dev/null
+++ b/qa/suites/rados/verify/ceph.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon min osdmap epochs: 50
+ paxos service trim min: 10
+ # prune full osdmaps regularly
+ mon osdmap full prune min: 15
+ mon osdmap full prune interval: 2
+ mon osdmap full prune txsize: 2
+tasks:
+- install:
+- ceph:
diff --git a/qa/suites/rados/verify/clusters/+ b/qa/suites/rados/verify/clusters/+
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/verify/clusters/+
diff --git a/qa/suites/rados/verify/clusters/.qa b/qa/suites/rados/verify/clusters/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/verify/clusters/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/verify/clusters/fixed-2.yaml b/qa/suites/rados/verify/clusters/fixed-2.yaml
new file mode 120000
index 00000000..230ff0fd
--- /dev/null
+++ b/qa/suites/rados/verify/clusters/fixed-2.yaml
@@ -0,0 +1 @@
+.qa/clusters/fixed-2.yaml \ No newline at end of file
diff --git a/qa/suites/rados/verify/clusters/openstack.yaml b/qa/suites/rados/verify/clusters/openstack.yaml
new file mode 100644
index 00000000..e559d912
--- /dev/null
+++ b/qa/suites/rados/verify/clusters/openstack.yaml
@@ -0,0 +1,4 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
diff --git a/qa/suites/rados/verify/d-thrash/.qa b/qa/suites/rados/verify/d-thrash/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/verify/d-thrash/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/verify/d-thrash/default/+ b/qa/suites/rados/verify/d-thrash/default/+
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/verify/d-thrash/default/+
diff --git a/qa/suites/rados/verify/d-thrash/default/.qa b/qa/suites/rados/verify/d-thrash/default/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/verify/d-thrash/default/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/verify/d-thrash/default/default.yaml b/qa/suites/rados/verify/d-thrash/default/default.yaml
new file mode 100644
index 00000000..8f2b2667
--- /dev/null
+++ b/qa/suites/rados/verify/d-thrash/default/default.yaml
@@ -0,0 +1,11 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - but it is still running
+ - objects unfound and apparently lost
+tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgnum_shrink: 1
+ chance_pgpnum_fix: 1
diff --git a/qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml b/qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml
new file mode 120000
index 00000000..9124eb1a
--- /dev/null
+++ b/qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml
@@ -0,0 +1 @@
+.qa/tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/qa/suites/rados/verify/d-thrash/none.yaml b/qa/suites/rados/verify/d-thrash/none.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rados/verify/d-thrash/none.yaml
diff --git a/qa/suites/rados/verify/msgr b/qa/suites/rados/verify/msgr
new file mode 120000
index 00000000..57bee80d
--- /dev/null
+++ b/qa/suites/rados/verify/msgr
@@ -0,0 +1 @@
+.qa/msgr \ No newline at end of file
diff --git a/qa/suites/rados/verify/msgr-failures/.qa b/qa/suites/rados/verify/msgr-failures/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/verify/msgr-failures/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/verify/msgr-failures/few.yaml b/qa/suites/rados/verify/msgr-failures/few.yaml
new file mode 100644
index 00000000..4326fe23
--- /dev/null
+++ b/qa/suites/rados/verify/msgr-failures/few.yaml
@@ -0,0 +1,7 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
+ log-whitelist:
+ - \(OSD_SLOW_PING_TIME
diff --git a/qa/suites/rados/verify/objectstore b/qa/suites/rados/verify/objectstore
new file mode 120000
index 00000000..c40bd326
--- /dev/null
+++ b/qa/suites/rados/verify/objectstore
@@ -0,0 +1 @@
+.qa/objectstore \ No newline at end of file
diff --git a/qa/suites/rados/verify/rados.yaml b/qa/suites/rados/verify/rados.yaml
new file mode 120000
index 00000000..d256979c
--- /dev/null
+++ b/qa/suites/rados/verify/rados.yaml
@@ -0,0 +1 @@
+.qa/config/rados.yaml \ No newline at end of file
diff --git a/qa/suites/rados/verify/tasks/.qa b/qa/suites/rados/verify/tasks/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/verify/tasks/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/verify/tasks/mon_recovery.yaml b/qa/suites/rados/verify/tasks/mon_recovery.yaml
new file mode 100644
index 00000000..266a4e47
--- /dev/null
+++ b/qa/suites/rados/verify/tasks/mon_recovery.yaml
@@ -0,0 +1,10 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(OSDMAP_FLAGS\)
+ - \(SMALLER_PGP_NUM\)
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- mon_recovery:
diff --git a/qa/suites/rados/verify/tasks/rados_api_tests.yaml b/qa/suites/rados/verify/tasks/rados_api_tests.yaml
new file mode 100644
index 00000000..79f24479
--- /dev/null
+++ b/qa/suites/rados/verify/tasks/rados_api_tests.yaml
@@ -0,0 +1,30 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - reached quota
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_FULL\)
+ - \(SMALLER_PGP_NUM\)
+ - \(SLOW_OPS\)
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(PG_AVAILABILITY\)
+ - \(OBJECT_MISPLACED\)
+ - slow request
+ conf:
+ client:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+ debug monc: 20
+ mon:
+ mon warn on pool no app: false
+tasks:
+- workunit:
+ timeout: 6h
+ env:
+ ALLOW_TIMEOUTS: "1"
+ clients:
+ client.0:
+ - rados/test.sh
diff --git a/qa/suites/rados/verify/tasks/rados_cls_all.yaml b/qa/suites/rados/verify/tasks/rados_cls_all.yaml
new file mode 100644
index 00000000..bcc58e19
--- /dev/null
+++ b/qa/suites/rados/verify/tasks/rados_cls_all.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ conf:
+ osd:
+ osd_class_load_list: "cephfs hello journal lock log numops rbd refcount
+ rgw sdk timeindex user version"
+ osd_class_default_list: "cephfs hello journal lock log numops rbd refcount
+ rgw sdk timeindex user version"
+tasks:
+- workunit:
+ clients:
+ client.0:
+ - cls
diff --git a/qa/suites/rados/verify/validater/.qa b/qa/suites/rados/verify/validater/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rados/verify/validater/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/verify/validater/lockdep.yaml b/qa/suites/rados/verify/validater/lockdep.yaml
new file mode 100644
index 00000000..25f84355
--- /dev/null
+++ b/qa/suites/rados/verify/validater/lockdep.yaml
@@ -0,0 +1,5 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ lockdep: true
diff --git a/qa/suites/rados/verify/validater/valgrind.yaml b/qa/suites/rados/verify/validater/valgrind.yaml
new file mode 100644
index 00000000..5ac297cc
--- /dev/null
+++ b/qa/suites/rados/verify/validater/valgrind.yaml
@@ -0,0 +1,32 @@
+# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126
+os_type: centos
+os_version: '7.8'
+
+overrides:
+ install:
+ ceph:
+ debuginfo: true
+ ceph:
+ conf:
+ global:
+ osd heartbeat grace: 80
+ mon:
+ mon osd crush smoke test: false
+ osd:
+ osd fast shutdown: false
+ debug bluestore: 1
+ debug bluefs: 1
+ log-whitelist:
+ - overall HEALTH_
+# valgrind is slow.. we might get PGs stuck peering etc
+ - \(PG_
+# mons sometimes are left off of initial quorum due to valgrind slowness. ok to whitelist here because we'll still catch an actual crash due to the core
+ - \(MON_DOWN\)
+ - \(SLOW_OPS\)
+ - slow request
+ valgrind:
+ mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes]
+ osd: [--tool=memcheck]
+ mds: [--tool=memcheck]
+# https://tracker.ceph.com/issues/38621
+# mgr: [--tool=memcheck]