summaryrefslogtreecommitdiffstats
path: root/qa/suites/rados/singleton
diff options
context:
space:
mode:
Diffstat (limited to 'qa/suites/rados/singleton')
-rw-r--r--qa/suites/rados/singleton/%0
l---------qa/suites/rados/singleton/.qa1
l---------qa/suites/rados/singleton/all/.qa1
-rw-r--r--qa/suites/rados/singleton/all/admin-socket.yaml26
-rw-r--r--qa/suites/rados/singleton/all/backfill-toofull.yaml37
-rw-r--r--qa/suites/rados/singleton/all/deduptool.yaml28
-rw-r--r--qa/suites/rados/singleton/all/divergent_priors.yaml28
-rw-r--r--qa/suites/rados/singleton/all/divergent_priors2.yaml28
-rw-r--r--qa/suites/rados/singleton/all/dump-stuck.yaml21
-rw-r--r--qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml36
-rw-r--r--qa/suites/rados/singleton/all/ec-lost-unfound.yaml29
-rw-r--r--qa/suites/rados/singleton/all/erasure-code-nonregression.yaml17
-rw-r--r--qa/suites/rados/singleton/all/lost-unfound-delete.yaml27
-rw-r--r--qa/suites/rados/singleton/all/lost-unfound.yaml27
-rw-r--r--qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml29
-rw-r--r--qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml34
-rw-r--r--qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml34
-rw-r--r--qa/suites/rados/singleton/all/mon-auth-caps.yaml19
-rw-r--r--qa/suites/rados/singleton/all/mon-config-key-caps.yaml19
-rw-r--r--qa/suites/rados/singleton/all/mon-config-keys.yaml22
-rw-r--r--qa/suites/rados/singleton/all/mon-config.yaml22
-rw-r--r--qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled154
-rw-r--r--qa/suites/rados/singleton/all/osd-backfill.yaml28
-rw-r--r--qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml30
-rw-r--r--qa/suites/rados/singleton/all/osd-recovery.yaml32
-rw-r--r--qa/suites/rados/singleton/all/peer.yaml27
-rw-r--r--qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml44
-rw-r--r--qa/suites/rados/singleton/all/pg-autoscaler.yaml36
-rw-r--r--qa/suites/rados/singleton/all/pg-removal-interruption.yaml36
-rw-r--r--qa/suites/rados/singleton/all/radostool.yaml28
-rw-r--r--qa/suites/rados/singleton/all/random-eio.yaml46
-rw-r--r--qa/suites/rados/singleton/all/rebuild-mondb.yaml37
-rw-r--r--qa/suites/rados/singleton/all/recovery-preemption.yaml60
-rw-r--r--qa/suites/rados/singleton/all/resolve_stuck_peering.yaml19
-rw-r--r--qa/suites/rados/singleton/all/test-crash.yaml20
-rw-r--r--qa/suites/rados/singleton/all/test-noautoscale-flag.yaml39
-rw-r--r--qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml22
-rw-r--r--qa/suites/rados/singleton/all/thrash-backfill-full.yaml52
-rw-r--r--qa/suites/rados/singleton/all/thrash-eio.yaml49
-rw-r--r--qa/suites/rados/singleton/all/thrash-rados/+0
l---------qa/suites/rados/singleton/all/thrash-rados/.qa1
-rw-r--r--qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml27
l---------qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml1
-rw-r--r--qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml70
-rw-r--r--qa/suites/rados/singleton/all/watch-notify-same-primary.yaml34
l---------qa/suites/rados/singleton/mon_election1
l---------qa/suites/rados/singleton/msgr1
l---------qa/suites/rados/singleton/msgr-failures/.qa1
-rw-r--r--qa/suites/rados/singleton/msgr-failures/few.yaml9
-rw-r--r--qa/suites/rados/singleton/msgr-failures/many.yaml13
-rw-r--r--qa/suites/rados/singleton/msgr-failures/none.yaml0
l---------qa/suites/rados/singleton/objectstore1
l---------qa/suites/rados/singleton/rados.yaml1
l---------qa/suites/rados/singleton/supported-random-distro$1
54 files changed, 1405 insertions, 0 deletions
diff --git a/qa/suites/rados/singleton/% b/qa/suites/rados/singleton/%
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/qa/suites/rados/singleton/%
diff --git a/qa/suites/rados/singleton/.qa b/qa/suites/rados/singleton/.qa
new file mode 120000
index 000000000..a602a0353
--- /dev/null
+++ b/qa/suites/rados/singleton/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/singleton/all/.qa b/qa/suites/rados/singleton/all/.qa
new file mode 120000
index 000000000..a602a0353
--- /dev/null
+++ b/qa/suites/rados/singleton/all/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/singleton/all/admin-socket.yaml b/qa/suites/rados/singleton/all/admin-socket.yaml
new file mode 100644
index 000000000..76bee411c
--- /dev/null
+++ b/qa/suites/rados/singleton/all/admin-socket.yaml
@@ -0,0 +1,26 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - client.a
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+- admin_socket:
+ osd.0:
+ version:
+ git_version:
+ help:
+ config show:
+ config help:
+ config set bluestore_csum_type xxhash64:
+ perf dump:
+ perf schema:
+ get_heap_property tcmalloc.max_total_thread_cache_byte:
+ set_heap_property tcmalloc.max_total_thread_cache_bytes 67108864:
+ set_heap_property tcmalloc.max_total_thread_cache_bytes 33554432:
diff --git a/qa/suites/rados/singleton/all/backfill-toofull.yaml b/qa/suites/rados/singleton/all/backfill-toofull.yaml
new file mode 100644
index 000000000..fcc3d0e29
--- /dev/null
+++ b/qa/suites/rados/singleton/all/backfill-toofull.yaml
@@ -0,0 +1,37 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ create_rbd_pool: false
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - Error
+ - overall HEALTH_
+ - \(OBJECT_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(POOL_BACKFILLFULL\)
+ - \(POOL_NEARFULL\)
+ - \(SLOW_OPS\)
+ - \(TOO_FEW_PGS\)
+ - Monitor daemon marked osd\.[[:digit:]]+ down, but it is still running
+ - slow request
+ conf:
+ osd:
+ osd min pg log entries: 5
+ osd max pg log entries: 5
+- backfill_toofull:
diff --git a/qa/suites/rados/singleton/all/deduptool.yaml b/qa/suites/rados/singleton/all/deduptool.yaml
new file mode 100644
index 000000000..616a0b33c
--- /dev/null
+++ b/qa/suites/rados/singleton/all/deduptool.yaml
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - but it is still running
+ - had wrong client addr
+ - had wrong cluster addr
+ - reached quota
+ - overall HEALTH_
+ - \(POOL_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+ clients:
+ all:
+ - rados/test_dedup_tool.sh
diff --git a/qa/suites/rados/singleton/all/divergent_priors.yaml b/qa/suites/rados/singleton/all/divergent_priors.yaml
new file mode 100644
index 000000000..24b42557f
--- /dev/null
+++ b/qa/suites/rados/singleton/all/divergent_priors.yaml
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ log-ignorelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ - \(POOL_APP_NOT_ENABLED\)
+
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+- divergent_priors:
diff --git a/qa/suites/rados/singleton/all/divergent_priors2.yaml b/qa/suites/rados/singleton/all/divergent_priors2.yaml
new file mode 100644
index 000000000..6bef63958
--- /dev/null
+++ b/qa/suites/rados/singleton/all/divergent_priors2.yaml
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+
+overrides:
+ ceph:
+ log-ignorelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ - \(POOL_APP_NOT_ENABLED\)
+
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+- divergent_priors2:
diff --git a/qa/suites/rados/singleton/all/dump-stuck.yaml b/qa/suites/rados/singleton/all/dump-stuck.yaml
new file mode 100644
index 000000000..c1d28ee8e
--- /dev/null
+++ b/qa/suites/rados/singleton/all/dump-stuck.yaml
@@ -0,0 +1,21 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- dump_stuck:
diff --git a/qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml b/qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml
new file mode 100644
index 000000000..d71eab149
--- /dev/null
+++ b/qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml
@@ -0,0 +1,36 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ create_rbd_pool: false
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - \(OBJECT_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(SLOW_OPS\)
+ - deep-scrub
+ - missing
+ - overall HEALTH_
+ - repair
+ - slow request
+ - unfound
+ conf:
+ osd:
+ osd min pg log entries: 5
+ osd max pg log entries: 5
+- ec_inconsistent_hinfo:
diff --git a/qa/suites/rados/singleton/all/ec-lost-unfound.yaml b/qa/suites/rados/singleton/all/ec-lost-unfound.yaml
new file mode 100644
index 000000000..9c423c8d8
--- /dev/null
+++ b/qa/suites/rados/singleton/all/ec-lost-unfound.yaml
@@ -0,0 +1,29 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ create_rbd_pool: false
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ - \(SLOW_OPS\)
+ - slow request
+- ec_lost_unfound:
diff --git a/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml b/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml
new file mode 100644
index 000000000..e8201ee0b
--- /dev/null
+++ b/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml
@@ -0,0 +1,17 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- workunit:
+ clients:
+ all:
+ - erasure-code/encode-decode-non-regression.sh
diff --git a/qa/suites/rados/singleton/all/lost-unfound-delete.yaml b/qa/suites/rados/singleton/all/lost-unfound-delete.yaml
new file mode 100644
index 000000000..bb170b506
--- /dev/null
+++ b/qa/suites/rados/singleton/all/lost-unfound-delete.yaml
@@ -0,0 +1,27 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ - \(SLOW_OPS\)
+ - slow request
+- rep_lost_unfound_delete:
diff --git a/qa/suites/rados/singleton/all/lost-unfound.yaml b/qa/suites/rados/singleton/all/lost-unfound.yaml
new file mode 100644
index 000000000..fceee20c0
--- /dev/null
+++ b/qa/suites/rados/singleton/all/lost-unfound.yaml
@@ -0,0 +1,27 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ - \(SLOW_OPS\)
+ - slow request
+- lost_unfound:
diff --git a/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml b/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml
new file mode 100644
index 000000000..e5999bc9b
--- /dev/null
+++ b/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml
@@ -0,0 +1,29 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: False
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ conf:
+ mon:
+ osd pool default size: 2
+ osd:
+ mon max pg per osd : 2
+ osd max pg per osd hard ratio : 1
+ log-ignorelist:
+ - \(TOO_FEW_PGS\)
+ - \(PENDING_CREATING_PGS\)
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+ test_create_from_mon: True
+ pg_num: 2
diff --git a/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml b/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml
new file mode 100644
index 000000000..075d6be1f
--- /dev/null
+++ b/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml
@@ -0,0 +1,34 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: False
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ conf:
+ mon:
+ osd pool default size: 2
+ osd:
+ mon max pg per osd : 1
+ osd max pg per osd hard ratio : 1
+ log-ignorelist:
+ - \(TOO_FEW_PGS\)
+ - \(PG_
+ - \(PENDING_CREATING_PGS\)
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+ test_create_from_mon: False
+ pg_num: 1
+ pool_size: 2
+ from_primary: True
diff --git a/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml b/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml
new file mode 100644
index 000000000..db2856484
--- /dev/null
+++ b/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml
@@ -0,0 +1,34 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: False
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ conf:
+ mon:
+ osd pool default size: 2
+ osd:
+ mon max pg per osd : 1
+ osd max pg per osd hard ratio : 1
+ log-ignorelist:
+ - \(TOO_FEW_PGS\)
+ - \(PG_
+ - \(PENDING_CREATING_PGS\)
+tasks:
+- install:
+- ceph:
+- osd_max_pg_per_osd:
+ test_create_from_mon: False
+ pg_num: 1
+ pool_size: 2
+ from_primary: False
diff --git a/qa/suites/rados/singleton/all/mon-auth-caps.yaml b/qa/suites/rados/singleton/all/mon-auth-caps.yaml
new file mode 100644
index 000000000..8c23c0bc9
--- /dev/null
+++ b/qa/suites/rados/singleton/all/mon-auth-caps.yaml
@@ -0,0 +1,19 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - overall HEALTH_
+ - \(AUTH_BAD_CAPS\)
+- workunit:
+ clients:
+ all:
+ - mon/auth_caps.sh
diff --git a/qa/suites/rados/singleton/all/mon-config-key-caps.yaml b/qa/suites/rados/singleton/all/mon-config-key-caps.yaml
new file mode 100644
index 000000000..f987f3c98
--- /dev/null
+++ b/qa/suites/rados/singleton/all/mon-config-key-caps.yaml
@@ -0,0 +1,19 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - overall HEALTH_
+ - \(AUTH_BAD_CAPS\)
+- workunit:
+ clients:
+ all:
+ - mon/test_config_key_caps.sh
diff --git a/qa/suites/rados/singleton/all/mon-config-keys.yaml b/qa/suites/rados/singleton/all/mon-config-keys.yaml
new file mode 100644
index 000000000..7d8b920cb
--- /dev/null
+++ b/qa/suites/rados/singleton/all/mon-config-keys.yaml
@@ -0,0 +1,22 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+- workunit:
+ clients:
+ all:
+ - mon/test_mon_config_key.py
diff --git a/qa/suites/rados/singleton/all/mon-config.yaml b/qa/suites/rados/singleton/all/mon-config.yaml
new file mode 100644
index 000000000..3627e17df
--- /dev/null
+++ b/qa/suites/rados/singleton/all/mon-config.yaml
@@ -0,0 +1,22 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+- workunit:
+ clients:
+ all:
+ - mon/config.sh
diff --git a/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled b/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled
new file mode 100644
index 000000000..120e073a7
--- /dev/null
+++ b/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled
@@ -0,0 +1,154 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+ - osd.8
+ - osd.9
+ - osd.10
+ - osd.11
+ - osd.12
+ - osd.13
+ - osd.14
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 1 # GB
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon memory target: 134217728 # reduced to 128_M
+ rocksdb cache size: 67108864 # reduced to 64_M
+ mon osd cache size: 100000
+ mon osd cache size min: 134217728
+ osd:
+ osd memory target: 1610612736 # reduced to 1.5_G
+ osd objectstore: bluestore
+ debug bluestore: 20
+ osd scrub min interval: 60
+ osd scrub max interval: 120
+ osd max backfills: 9
+
+tasks:
+- install:
+ branch: wip-sseshasa2-testing-2019-07-30-1825 # change as appropriate
+- ceph:
+ create_rbd_pool: false
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(POOL_
+ - \(CACHE_POOL_
+ - \(OBJECT_
+ - \(SLOW_OPS\)
+ - \(REQUEST_SLOW\)
+ - \(TOO_FEW_PGS\)
+ - slow request
+- interactive:
+- parallel:
+ - log-mon-rss
+ - stress-tasks
+ - benchload
+- exec:
+ client.0:
+ - "ceph_test_mon_memory_target 134217728" # mon memory target
+ - "ceph_test_mon_rss_usage 134217728"
+log-mon-rss:
+- background_exec:
+ client.0:
+ - while true
+ - do /usr/bin/ceph_test_log_rss_usage ceph-mon >> /var/log/ceph/ceph-mon-rss-usage.log
+ - sleep 300 # log rss usage every 5 mins. May be modified accordingly
+ - done
+- exec:
+ client.0:
+ - sleep 37860 # sum total of the radosbench test times below plus 60 secs
+benchload: # The total radosbench test below translates to 10.5 hrs
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+ - radosbench:
+ clients: [client.0]
+ time: 1800
+stress-tasks:
+- thrashosds:
+ op_delay: 1
+ bdev_inject_crash: 1
+ bdev_inject_crash_probability: .8
+ chance_down: 80
+ chance_pgnum_grow: 3
+ chance_pgpnum_fix: 1
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 3
+ chance_thrash_pg_upmap_items: 3
+ min_in: 2
diff --git a/qa/suites/rados/singleton/all/osd-backfill.yaml b/qa/suites/rados/singleton/all/osd-backfill.yaml
new file mode 100644
index 000000000..bbbd9b4b3
--- /dev/null
+++ b/qa/suites/rados/singleton/all/osd-backfill.yaml
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ conf:
+ osd:
+ osd min pg log entries: 5
+- osd_backfill:
diff --git a/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml b/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml
new file mode 100644
index 000000000..15a0ea342
--- /dev/null
+++ b/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml
@@ -0,0 +1,30 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
+ conf:
+ osd:
+ osd min pg log entries: 5
+ osd_fast_fail_on_connection_refused: false
+- osd_recovery.test_incomplete_pgs:
diff --git a/qa/suites/rados/singleton/all/osd-recovery.yaml b/qa/suites/rados/singleton/all/osd-recovery.yaml
new file mode 100644
index 000000000..75cea6a94
--- /dev/null
+++ b/qa/suites/rados/singleton/all/osd-recovery.yaml
@@ -0,0 +1,32 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ - \(SLOW_OPS\)
+ - slow request
+ conf:
+ osd:
+ osd min pg log entries: 5
+ osd pg log trim min: 0
+ osd_fast_fail_on_connection_refused: false
+- osd_recovery:
diff --git a/qa/suites/rados/singleton/all/peer.yaml b/qa/suites/rados/singleton/all/peer.yaml
new file mode 100644
index 000000000..24fd74b82
--- /dev/null
+++ b/qa/suites/rados/singleton/all/peer.yaml
@@ -0,0 +1,27 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ config:
+ global:
+ osd pool default min size : 1
+ log-ignorelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- peer:
diff --git a/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml b/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml
new file mode 100644
index 000000000..042c3d78e
--- /dev/null
+++ b/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml
@@ -0,0 +1,44 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+- - mon.b
+ - mon.c
+ - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ create_rbd_pool: false
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(POOL_
+ - \(CACHE_POOL_
+ - \(OBJECT_
+ - \(SLOW_OPS\)
+ - \(REQUEST_SLOW\)
+ - \(TOO_FEW_PGS\)
+ - slow request
+- exec:
+ client.0:
+ - ceph progress off
+
+- workunit:
+ clients:
+ all:
+ - mon/pg_autoscaler.sh
diff --git a/qa/suites/rados/singleton/all/pg-autoscaler.yaml b/qa/suites/rados/singleton/all/pg-autoscaler.yaml
new file mode 100644
index 000000000..c2ab618ca
--- /dev/null
+++ b/qa/suites/rados/singleton/all/pg-autoscaler.yaml
@@ -0,0 +1,36 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ create_rbd_pool: false
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(POOL_
+ - \(CACHE_POOL_
+ - \(OBJECT_
+ - \(SLOW_OPS\)
+ - \(REQUEST_SLOW\)
+ - \(TOO_FEW_PGS\)
+ - slow request
+- workunit:
+ clients:
+ all:
+ - mon/pg_autoscaler.sh
diff --git a/qa/suites/rados/singleton/all/pg-removal-interruption.yaml b/qa/suites/rados/singleton/all/pg-removal-interruption.yaml
new file mode 100644
index 000000000..b3f11264f
--- /dev/null
+++ b/qa/suites/rados/singleton/all/pg-removal-interruption.yaml
@@ -0,0 +1,36 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - but it is still running
+ - slow request
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+- exec:
+ client.0:
+ - sudo ceph osd pool create foo 128 128
+ - sudo ceph osd pool application enable foo rados
+ - sleep 5
+ - sudo ceph tell osd.0 injectargs -- --osd-inject-failure-on-pg-removal
+ - sudo ceph osd pool delete foo foo --yes-i-really-really-mean-it
+- ceph.wait_for_failure: [osd.0]
+- exec:
+ client.0:
+ - sudo ceph osd down 0
+- ceph.restart: [osd.0]
+- ceph.healthy:
diff --git a/qa/suites/rados/singleton/all/radostool.yaml b/qa/suites/rados/singleton/all/radostool.yaml
new file mode 100644
index 000000000..fa3a1b0f7
--- /dev/null
+++ b/qa/suites/rados/singleton/all/radostool.yaml
@@ -0,0 +1,28 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - but it is still running
+ - had wrong client addr
+ - had wrong cluster addr
+ - reached quota
+ - overall HEALTH_
+ - \(POOL_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+ clients:
+ all:
+ - rados/test_rados_tool.sh
diff --git a/qa/suites/rados/singleton/all/random-eio.yaml b/qa/suites/rados/singleton/all/random-eio.yaml
new file mode 100644
index 000000000..782b906d6
--- /dev/null
+++ b/qa/suites/rados/singleton/all/random-eio.yaml
@@ -0,0 +1,46 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - missing primary copy of
+ - objects unfound and apparently lost
+ - had a read error
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(PG_DEGRADED\)
+ - \(OSD_TOO_MANY_REPAIRS\)
+- full_sequential:
+ - exec:
+ client.0:
+ - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.33
+ - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.33
+ - sudo ceph osd pool create test 16 16
+ - sudo ceph osd pool set test size 3
+ - sudo ceph pg dump pgs --format=json-pretty
+ - radosbench:
+ clients: [client.0]
+ time: 360
+ type: rand
+ objectsize: 1048576
+ pool: test
+ create_pool: false
+ - exec:
+ client.0:
+ - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.0
+ - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.0
diff --git a/qa/suites/rados/singleton/all/rebuild-mondb.yaml b/qa/suites/rados/singleton/all/rebuild-mondb.yaml
new file mode 100644
index 000000000..3f1c74831
--- /dev/null
+++ b/qa/suites/rados/singleton/all/rebuild-mondb.yaml
@@ -0,0 +1,37 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - no reply from
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(MGR_DOWN\)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ conf:
+ mon:
+ debug auth: 30
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 30
+ - rebuild_mondb:
+ - radosbench:
+ clients: [client.0]
+ time: 30
diff --git a/qa/suites/rados/singleton/all/recovery-preemption.yaml b/qa/suites/rados/singleton/all/recovery-preemption.yaml
new file mode 100644
index 000000000..7438f9e77
--- /dev/null
+++ b/qa/suites/rados/singleton/all/recovery-preemption.yaml
@@ -0,0 +1,60 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 20 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ conf:
+ osd:
+ osd recovery sleep: .1
+ osd min pg log entries: 10
+ osd max pg log entries: 1000
+ osd_target_pg_log_entries_per_osd: 0
+ osd pg log trim min: 10
+ log-ignorelist:
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(OBJECT_
+ - \(PG_
+ - \(SLOW_OPS\)
+ - overall HEALTH
+ - slow request
+- exec:
+ osd.0:
+ - ceph osd pool create foo 128
+ - ceph osd pool application enable foo foo
+ - sleep 5
+- ceph.healthy:
+- exec:
+ osd.0:
+ - rados -p foo bench 30 write -b 4096 --no-cleanup
+ - ceph osd out 0
+ - sleep 5
+ - ceph osd set noup
+- ceph.restart:
+ daemons: [osd.1]
+ wait-for-up: false
+ wait-for-healthy: false
+- exec:
+ osd.0:
+ - rados -p foo bench 3 write -b 4096 --no-cleanup
+ - ceph osd unset noup
+ - sleep 10
+ - for f in 0 1 2 3 ; do sudo ceph daemon osd.$f config set osd_recovery_sleep 0 ; sudo ceph daemon osd.$f config set osd_recovery_max_active 20 ; done
+- ceph.healthy:
+- exec:
+ osd.0:
+ - egrep '(defer backfill|defer recovery)' /var/log/ceph/ceph-osd.*.log
diff --git a/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml b/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml
new file mode 100644
index 000000000..2756ebe82
--- /dev/null
+++ b/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml
@@ -0,0 +1,19 @@
+roles:
+- [mon.a, mgr.x]
+- [osd.0, osd.1, osd.2, client.0]
+
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ fs: xfs
+ log-ignorelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+ - \(POOL_APP_NOT_ENABLED\)
+- resolve_stuck_peering:
+
diff --git a/qa/suites/rados/singleton/all/test-crash.yaml b/qa/suites/rados/singleton/all/test-crash.yaml
new file mode 100644
index 000000000..beb83f0bb
--- /dev/null
+++ b/qa/suites/rados/singleton/all/test-crash.yaml
@@ -0,0 +1,20 @@
+roles:
+ - [client.0, mon.a, mgr.x, osd.0, osd.1, osd.2]
+
+tasks:
+ - install:
+ - ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - Reduced data availability
+ - OSD_.*DOWN
+ - \(RECENT_CRASH\)
+ - workunit:
+ clients:
+ client.0:
+ - rados/test_crash.sh
+ - ceph.restart: [osd.*]
+ - exec:
+ mon.a:
+ - find $TESTDIR/archive/coredump -type f -exec rm -f {} \;
diff --git a/qa/suites/rados/singleton/all/test-noautoscale-flag.yaml b/qa/suites/rados/singleton/all/test-noautoscale-flag.yaml
new file mode 100644
index 000000000..23caa745d
--- /dev/null
+++ b/qa/suites/rados/singleton/all/test-noautoscale-flag.yaml
@@ -0,0 +1,39 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+overrides:
+ ceph:
+ create_rbd_pool: false
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ conf:
+ mon:
+ osd pool default pg autoscale mode: on
+ log-ignorelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(POOL_
+ - \(CACHE_POOL_
+ - \(OBJECT_
+ - \(SLOW_OPS\)
+ - \(REQUEST_SLOW\)
+ - \(TOO_FEW_PGS\)
+ - slow request
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - mon/test_noautoscale_flag.sh
diff --git a/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml b/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml
new file mode 100644
index 000000000..a76f6a8f0
--- /dev/null
+++ b/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml
@@ -0,0 +1,22 @@
+overrides:
+ ceph:
+ fs: ext4
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+ osd client message cap: 5000
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+ clients:
+ all:
+ - rados/test_envlibrados_for_rocksdb.sh
diff --git a/qa/suites/rados/singleton/all/thrash-backfill-full.yaml b/qa/suites/rados/singleton/all/thrash-backfill-full.yaml
new file mode 100644
index 000000000..0f2924db3
--- /dev/null
+++ b/qa/suites/rados/singleton/all/thrash-backfill-full.yaml
@@ -0,0 +1,52 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+override:
+ ceph:
+ conf:
+ mon:
+ osd default pool size: 3
+ osd min pg log entries: 5
+ osd max pg log entries: 10
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - but it is still running
+ - missing primary copy of
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(SLOW_OPS\)
+ - \(PG_
+ - \(OBJECT_MISPLACED\)
+ - \(OSD_
+ - \(OBJECT_
+ - \(TOO_FEW_PGS\)
+ - \(POOL_BACKFILLFULL\)
+ - slow request
+- thrashosds:
+ op_delay: 30
+ clean_interval: 120
+ chance_down: .75
+ min_live: 5
+ min_in: 5
+ chance_test_backfill_full: .5
+- radosbench:
+ clients: [client.0]
+ time: 1800
+ type: rand
+ objectsize: 1048576
diff --git a/qa/suites/rados/singleton/all/thrash-eio.yaml b/qa/suites/rados/singleton/all/thrash-eio.yaml
new file mode 100644
index 000000000..5d9770061
--- /dev/null
+++ b/qa/suites/rados/singleton/all/thrash-eio.yaml
@@ -0,0 +1,49 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+override:
+ ceph:
+ conf:
+ mon:
+ osd default pool size: 3
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - but it is still running
+ - missing primary copy of
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(SLOW_OPS\)
+ - \(PG_
+ - \(OBJECT_MISPLACED\)
+ - \(OSD_
+ - \(OBJECT_
+ - \(TOO_FEW_PGS\)
+ - slow request
+- thrashosds:
+ op_delay: 30
+ clean_interval: 120
+ chance_down: .5
+ random_eio: .33
+ min_live: 5
+ min_in: 5
+- radosbench:
+ clients: [client.0]
+ time: 720
+ type: rand
+ objectsize: 1048576
diff --git a/qa/suites/rados/singleton/all/thrash-rados/+ b/qa/suites/rados/singleton/all/thrash-rados/+
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/qa/suites/rados/singleton/all/thrash-rados/+
diff --git a/qa/suites/rados/singleton/all/thrash-rados/.qa b/qa/suites/rados/singleton/all/thrash-rados/.qa
new file mode 120000
index 000000000..a602a0353
--- /dev/null
+++ b/qa/suites/rados/singleton/all/thrash-rados/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml b/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml
new file mode 100644
index 000000000..1caef6db5
--- /dev/null
+++ b/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml
@@ -0,0 +1,27 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ log-ignorelist:
+ - but it is still running
+- thrashosds:
+ op_delay: 30
+ clean_interval: 120
+ chance_down: .5
+- workunit:
+ clients:
+ all:
+ - rados/load-gen-mix-small.sh
diff --git a/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml b/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml
new file mode 120000
index 000000000..9124eb1aa
--- /dev/null
+++ b/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml
@@ -0,0 +1 @@
+.qa/tasks/thrashosds-health.yaml \ No newline at end of file
diff --git a/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml b/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
new file mode 100644
index 000000000..ab210abd7
--- /dev/null
+++ b/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml
@@ -0,0 +1,70 @@
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+- - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ log-ignorelist:
+ - but it is still running
+ - slow request
+ - overall HEALTH_
+ - \(CACHE_POOL_
+- exec:
+ client.0:
+ - sudo ceph osd pool create base 4
+ - sudo ceph osd pool application enable base rados
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add base cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay base cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 500
+- background_exec:
+ mon.a:
+ - while true
+ - do sleep 30
+ - sudo ceph osd pool set cache cache_target_full_ratio .001
+ - echo cache-try-flush-evict-all
+ - rados -p cache cache-try-flush-evict-all
+ - sleep 5
+ - echo cache-flush-evict-all
+ - rados -p cache cache-flush-evict-all
+ - sleep 5
+ - echo remove overlay
+ - sudo ceph osd tier remove-overlay base
+ - sleep 20
+ # Disabled due to https://tracker.ceph.com/issues/46323
+ #- echo add writeback overlay
+ #- sudo ceph osd tier cache-mode cache writeback
+ #- sudo ceph osd pool set cache cache_target_full_ratio .8
+ #- sudo ceph osd tier set-overlay base cache
+ #- sleep 30
+ #- sudo ceph osd tier cache-mode cache readproxy
+ - done
+- rados:
+ clients: [client.0]
+ pools: [base]
+ max_seconds: 600
+ ops: 400000
+ objects: 10000
+ size: 1024
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ copy_from: 50
diff --git a/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml b/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml
new file mode 100644
index 000000000..eeb585c88
--- /dev/null
+++ b/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml
@@ -0,0 +1,34 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force
+ config:
+ global:
+ osd pool default min size : 1
+ client:
+ debug ms: 1
+ debug objecter: 20
+ debug rados: 20
+ log-ignorelist:
+ - objects unfound and apparently lost
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
+- watch_notify_same_primary:
+ clients: [client.0]
diff --git a/qa/suites/rados/singleton/mon_election b/qa/suites/rados/singleton/mon_election
new file mode 120000
index 000000000..3f331e621
--- /dev/null
+++ b/qa/suites/rados/singleton/mon_election
@@ -0,0 +1 @@
+.qa/mon_election \ No newline at end of file
diff --git a/qa/suites/rados/singleton/msgr b/qa/suites/rados/singleton/msgr
new file mode 120000
index 000000000..57bee80db
--- /dev/null
+++ b/qa/suites/rados/singleton/msgr
@@ -0,0 +1 @@
+.qa/msgr \ No newline at end of file
diff --git a/qa/suites/rados/singleton/msgr-failures/.qa b/qa/suites/rados/singleton/msgr-failures/.qa
new file mode 120000
index 000000000..a602a0353
--- /dev/null
+++ b/qa/suites/rados/singleton/msgr-failures/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/singleton/msgr-failures/few.yaml b/qa/suites/rados/singleton/msgr-failures/few.yaml
new file mode 100644
index 000000000..8fd638744
--- /dev/null
+++ b/qa/suites/rados/singleton/msgr-failures/few.yaml
@@ -0,0 +1,9 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 5000
+ mon client directed command retry: 5
+ log-ignorelist:
+ - \(OSD_SLOW_PING_TIME
+ - \(MON_DOWN\)
diff --git a/qa/suites/rados/singleton/msgr-failures/many.yaml b/qa/suites/rados/singleton/msgr-failures/many.yaml
new file mode 100644
index 000000000..206da3ec1
--- /dev/null
+++ b/qa/suites/rados/singleton/msgr-failures/many.yaml
@@ -0,0 +1,13 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ ms inject socket failures: 1000
+ mon mgr beacon grace: 90
+ mon client hunt interval max multiple: 2
+ mon client directed command retry: 5
+ mgr:
+ debug monc: 10
+ log-ignorelist:
+ - \(OSD_SLOW_PING_TIME
+ - \(MON_DOWN\)
diff --git a/qa/suites/rados/singleton/msgr-failures/none.yaml b/qa/suites/rados/singleton/msgr-failures/none.yaml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/qa/suites/rados/singleton/msgr-failures/none.yaml
diff --git a/qa/suites/rados/singleton/objectstore b/qa/suites/rados/singleton/objectstore
new file mode 120000
index 000000000..848c65f9e
--- /dev/null
+++ b/qa/suites/rados/singleton/objectstore
@@ -0,0 +1 @@
+.qa/objectstore_debug \ No newline at end of file
diff --git a/qa/suites/rados/singleton/rados.yaml b/qa/suites/rados/singleton/rados.yaml
new file mode 120000
index 000000000..d256979c0
--- /dev/null
+++ b/qa/suites/rados/singleton/rados.yaml
@@ -0,0 +1 @@
+.qa/config/rados.yaml \ No newline at end of file
diff --git a/qa/suites/rados/singleton/supported-random-distro$ b/qa/suites/rados/singleton/supported-random-distro$
new file mode 120000
index 000000000..7cef21eef
--- /dev/null
+++ b/qa/suites/rados/singleton/supported-random-distro$
@@ -0,0 +1 @@
+../basic/supported-random-distro$ \ No newline at end of file