summaryrefslogtreecommitdiffstats
path: root/qa/suites/rados/singleton-nomsgr/all
diff options
context:
space:
mode:
Diffstat (limited to 'qa/suites/rados/singleton-nomsgr/all')
l---------qa/suites/rados/singleton-nomsgr/all/.qa1
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml28
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/balancer.yaml15
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml55
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml25
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml12
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/crushdiff.yaml24
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml41
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml41
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml23
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml30
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml18
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml24
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/msgr.yaml23
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml51
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml29
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/pool-access.yaml17
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml60
-rw-r--r--qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml15
19 files changed, 532 insertions, 0 deletions
diff --git a/qa/suites/rados/singleton-nomsgr/all/.qa b/qa/suites/rados/singleton-nomsgr/all/.qa
new file mode 120000
index 000000000..a602a0353
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml b/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml
new file mode 100644
index 000000000..341a559f3
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml
@@ -0,0 +1,28 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+roles:
+- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0]
+overrides:
+ ceph:
+ log-ignorelist:
+ - MDS in read-only mode
+ - force file system read-only
+ - overall HEALTH_
+ - \(FS_DEGRADED\)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_FULL\)
+ - \(MDS_READ_ONLY\)
+ - \(POOL_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+- rgw:
+ - client.0
+- exec:
+ client.0:
+ - ceph_test_admin_socket_output --all
diff --git a/qa/suites/rados/singleton-nomsgr/all/balancer.yaml b/qa/suites/rados/singleton-nomsgr/all/balancer.yaml
new file mode 100644
index 000000000..c42c5539d
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/balancer.yaml
@@ -0,0 +1,15 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+ fs: xfs
+ log-ignorelist:
+ - \(PG_AVAILABILITY\)
+ - \(POOL_APP_NOT_ENABLED\)
+- cram:
+ clients:
+ client.0:
+ - src/test/cli-integration/balancer/misplaced.t
diff --git a/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
new file mode 100644
index 000000000..fddbd0723
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml
@@ -0,0 +1,55 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1]
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+ log-ignorelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_APP_NOT_ENABLED\)
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+ debug client: 20
+ debug mds: 20
+ debug ms: 1
+- exec:
+ client.0:
+ - ceph osd pool create data_cache 4
+ - ceph osd tier add cephfs_data data_cache
+ - ceph osd tier cache-mode data_cache writeback
+ - ceph osd tier set-overlay cephfs_data data_cache
+ - ceph osd pool set data_cache hit_set_type bloom
+ - ceph osd pool set data_cache hit_set_count 8
+ - ceph osd pool set data_cache hit_set_period 3600
+ - ceph osd pool set data_cache min_read_recency_for_promote 0
+- ceph-fuse:
+- exec:
+ client.0:
+ - sudo chmod 777 $TESTDIR/mnt.0/
+ - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
+ - ls -al $TESTDIR/mnt.0/foo
+ - truncate --size 0 $TESTDIR/mnt.0/foo
+ - ls -al $TESTDIR/mnt.0/foo
+ - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5
+ - ls -al $TESTDIR/mnt.0/foo
+ - cp $TESTDIR/mnt.0/foo /tmp/foo
+ - sync
+ - rados -p data_cache ls -
+ - sleep 10
+ - rados -p data_cache ls -
+ - rados -p data_cache cache-flush-evict-all
+ - rados -p data_cache ls -
+ - sleep 1
+- exec:
+ client.1:
+ - hexdump -C /tmp/foo | head
+ - hexdump -C $TESTDIR/mnt.1/foo | head
+ - cmp $TESTDIR/mnt.1/foo /tmp/foo
diff --git a/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml b/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml
new file mode 100644
index 000000000..6a8faa4a8
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml
@@ -0,0 +1,25 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+
+overrides:
+ ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+ log-ignorelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+
+tasks:
+- install:
+- ceph:
+ log-ignorelist:
+ - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+ clients:
+ all:
+ - cephtool/test_kvstore_tool.sh
diff --git a/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml b/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml
new file mode 100644
index 000000000..530dc42a7
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml
@@ -0,0 +1,12 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+tasks:
+- install:
+- workunit:
+ clients:
+ all:
+ - post-file.sh
diff --git a/qa/suites/rados/singleton-nomsgr/all/crushdiff.yaml b/qa/suites/rados/singleton-nomsgr/all/crushdiff.yaml
new file mode 100644
index 000000000..1639f0ed5
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/crushdiff.yaml
@@ -0,0 +1,24 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 4
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, client.0]
+
+overrides:
+ ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+ log-ignorelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(PG_DEGRADED\)
+
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - rados/test_crushdiff.sh
diff --git a/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml
new file mode 100644
index 000000000..b4ce5468a
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml
@@ -0,0 +1,41 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+ log-ignorelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_APP_NOT_ENABLED\)
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+- exec:
+ client.0:
+ - ceph osd pool create base-pool 4
+ - ceph osd pool application enable base-pool rados
+ - ceph osd pool create cache-pool 4
+ - ceph osd tier add base-pool cache-pool
+ - ceph osd tier cache-mode cache-pool writeback
+ - ceph osd tier set-overlay base-pool cache-pool
+ - dd if=/dev/urandom of=$TESTDIR/foo bs=1M count=1
+ - rbd import --image-format 2 $TESTDIR/foo base-pool/bar
+ - rbd snap create base-pool/bar@snap
+ - rados -p base-pool cache-flush-evict-all
+ - rbd export base-pool/bar $TESTDIR/bar
+ - rbd export base-pool/bar@snap $TESTDIR/snap
+ - cmp $TESTDIR/foo $TESTDIR/bar
+ - cmp $TESTDIR/foo $TESTDIR/snap
+ - rm $TESTDIR/foo $TESTDIR/bar $TESTDIR/snap
diff --git a/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml
new file mode 100644
index 000000000..a06221449
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml
@@ -0,0 +1,41 @@
+# verify #13098 fix
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+overrides:
+ ceph:
+ log-ignorelist:
+ - is full
+ - overall HEALTH_
+ - \(POOL_FULL\)
+ - \(POOL_NEAR_FULL\)
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+ conf:
+ global:
+ osd max object name len: 460
+ osd max object namespace len: 64
+- exec:
+ client.0:
+ - ceph osd pool create ec-ca 1 1
+ - ceph osd pool create ec 1 1 erasure default
+ - ceph osd pool application enable ec rados
+ - ceph osd tier add ec ec-ca
+ - ceph osd tier cache-mode ec-ca readproxy
+ - ceph osd tier set-overlay ec ec-ca
+ - ceph osd pool set ec-ca hit_set_type bloom
+ - ceph osd pool set-quota ec-ca max_bytes 20480000
+ - ceph osd pool set-quota ec max_bytes 20480000
+ - ceph osd pool set ec-ca target_max_bytes 20480000
+ - timeout 30 rados -p ec-ca bench 30 write || true
+ - ceph osd pool set-quota ec-ca max_bytes 0
+ - ceph osd pool set-quota ec max_bytes 0
diff --git a/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml b/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml
new file mode 100644
index 000000000..5ed655324
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml
@@ -0,0 +1,23 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, client.0]
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+ conf:
+ osd:
+# we may land on ext4
+ osd max object name len: 400
+ osd max object namespace len: 64
+ log-ignorelist:
+ - but it is still running
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+ clients:
+ all:
+ - rados/test_health_warnings.sh
diff --git a/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml b/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml
new file mode 100644
index 000000000..e1e9d34ef
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml
@@ -0,0 +1,30 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+overrides:
+ ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+ log-ignorelist:
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_FULL\)
+ - \(MDS_READ_ONLY\)
+ - \(POOL_APP_NOT_ENABLED\)
+ - large omap objects
+ - Large omap object found
+ - application not enabled
+ conf:
+ osd:
+ osd scrub backoff ratio: 0
+ osd deep scrub large omap object value sum threshold: 8800000
+ osd deep scrub large omap object key threshold: 20000
+tasks:
+- install:
+- ceph:
+- workunit:
+ clients:
+ all:
+ - rados/test_large_omap_detection.py
diff --git a/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml b/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml
new file mode 100644
index 000000000..61c2fa663
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml
@@ -0,0 +1,18 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+overrides:
+ ceph:
+ log-ignorelist:
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+- exec:
+ client.0:
+ - ceph_test_lazy_omap_stats
diff --git a/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml b/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml
new file mode 100644
index 000000000..0c0a071e9
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml
@@ -0,0 +1,24 @@
+roles:
+- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0]
+overrides:
+ ceph:
+ log-ignorelist:
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- install:
+ extra_packages:
+ deb:
+ - libradosstriper-dev
+ - librados-dev
+ - libradospp-dev
+ rpm:
+ - libradosstriper-devel
+ - librados-devel
+ - libradospp-devel
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+- workunit:
+ clients:
+ all:
+ - rados/test_librados_build.sh
diff --git a/qa/suites/rados/singleton-nomsgr/all/msgr.yaml b/qa/suites/rados/singleton-nomsgr/all/msgr.yaml
new file mode 100644
index 000000000..4eb376fcf
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/msgr.yaml
@@ -0,0 +1,23 @@
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- exec:
+ client.0:
+ - ceph_test_async_driver
+ - ceph_test_msgr
+openstack:
+ - machine:
+ disk: 40 # GB
+ ram: 15000 # MB
+ cpus: 1
+ volumes: # attached to each instance
+ count: 0
+ size: 1 # GB
+overrides:
+ ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+ conf:
+ client:
+ debug ms: 20
diff --git a/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml
new file mode 100644
index 000000000..15952b989
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml
@@ -0,0 +1,51 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+roles:
+- - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - client.0
+- - osd.3
+ - osd.4
+ - osd.5
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+ log-ignorelist:
+ - overall HEALTH_
+ - \(PG_
+ - \(OSD_
+ - \(OBJECT_
+ - \(POOL_APP_NOT_ENABLED\)
+ conf:
+ osd:
+ osd debug reject backfill probability: .3
+ osd min pg log entries: 25
+ osd max pg log entries: 100
+ osd max object name len: 460
+ osd max object namespace len: 64
+- exec:
+ client.0:
+ - sudo ceph osd pool create foo 64
+ - sudo ceph osd pool application enable foo rados
+ - rados -p foo bench 60 write -b 1024 --no-cleanup
+ - sudo ceph osd pool set foo size 3
+ - sudo ceph osd out 0 1
+- sleep:
+ duration: 60
+- exec:
+ client.0:
+ - sudo ceph osd in 0 1
+- sleep:
+ duration: 60
+- exec:
+ client.0:
+ - sudo ceph osd pool set foo size 2
+- sleep:
+ duration: 300
diff --git a/qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml b/qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml
new file mode 100644
index 000000000..5beb2015f
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml
@@ -0,0 +1,29 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 3
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0]
+overrides:
+ ceph:
+ log-ignorelist:
+ - \(OSD_DOWN\)
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(SLOW_OPS\)
+ - \(PG_AVAILABILITY\)
+ - \(PG_DEGRADED\)
+ - application not enabled
+ - slow request
+ conf:
+ osd:
+ osd scrub backoff ratio: 0
+ osd deep scrub large omap object value sum threshold: 8800000
+ osd deep scrub large omap object key threshold: 20000
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+- exec:
+ client.0:
+ - ceph_test_osd_stale_read
diff --git a/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml b/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml
new file mode 100644
index 000000000..26d548430
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml
@@ -0,0 +1,17 @@
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 10 # GB
+roles:
+- [mon.a, mgr.x, osd.0, osd.1, client.0]
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+ log-ignorelist:
+ - \(POOL_APP_NOT_ENABLED\)
+- workunit:
+ clients:
+ all:
+ - rados/test_pool_access.sh
diff --git a/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml b/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml
new file mode 100644
index 000000000..39788ddd2
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml
@@ -0,0 +1,60 @@
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+openstack:
+ - volumes: # attached to each instance
+ count: 2
+ size: 20 # GB
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+ fs: xfs
+ conf:
+ osd:
+ osd recovery sleep: .1
+ osd objectstore: bluestore
+ log-ignorelist:
+ - \(POOL_APP_NOT_ENABLED\)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(OBJECT_
+ - \(PG_
+ - overall HEALTH
+- exec:
+ osd.0:
+ - ceph osd pool create foo 32
+ - ceph osd pool application enable foo foo
+ - rados -p foo bench 30 write -b 4096 --no-cleanup
+ - ceph osd set noup
+- ceph.restart:
+ daemons: [osd.0]
+ wait-for-up: false
+ wait-for-healthy: false
+- exec:
+ osd.0:
+ - sleep 5
+ - rados -p foo bench 3 write -b 4096 --no-cleanup
+ - ceph osd unset noup
+ - sleep 10
+ - ceph osd set noup
+- ceph.restart:
+ daemons: [osd.1]
+ wait-for-up: false
+ wait-for-healthy: false
+- exec:
+ osd.0:
+ - ceph osd out 0
+ - sleep 10
+ - ceph osd unset noup
+- ceph.healthy:
+ wait-for-healthy: false # only wait for osds up and pgs clean, ignore misplaced
+- exec:
+ osd.0:
+ - ceph osd in 0
+- ceph.healthy:
diff --git a/qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml b/qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml
new file mode 100644
index 000000000..daeeeef4e
--- /dev/null
+++ b/qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml
@@ -0,0 +1,15 @@
+roles:
+- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0]
+overrides:
+ ceph:
+ log-ignorelist:
+ - \(POOL_APP_NOT_ENABLED\)
+tasks:
+- install:
+- ceph:
+ pre-mgr-commands:
+ - sudo ceph config set mgr mgr_pool false --force
+- workunit:
+ clients:
+ all:
+ - rados/version_number_sanity.sh