From 19fcec84d8d7d21e796c7624e521b60d28ee21ed Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:45:59 +0200 Subject: Adding upstream version 16.2.11+ds. Signed-off-by: Daniel Baumann --- qa/suites/rados/singleton-nomsgr/% | 0 qa/suites/rados/singleton-nomsgr/.qa | 1 + qa/suites/rados/singleton-nomsgr/all/.qa | 1 + .../singleton-nomsgr/all/admin_socket_output.yaml | 27 ++++++++++ qa/suites/rados/singleton-nomsgr/all/balancer.yaml | 14 +++++ .../rados/singleton-nomsgr/all/cache-fs-trunc.yaml | 54 +++++++++++++++++++ .../singleton-nomsgr/all/ceph-kvstore-tool.yaml | 23 +++++++++ .../rados/singleton-nomsgr/all/ceph-post-file.yaml | 12 +++++ .../singleton-nomsgr/all/export-after-evict.yaml | 40 +++++++++++++++ .../rados/singleton-nomsgr/all/full-tiering.yaml | 40 +++++++++++++++ .../singleton-nomsgr/all/health-warnings.yaml | 22 ++++++++ .../all/large-omap-object-warnings.yaml | 30 +++++++++++ .../all/lazy_omap_stats_output.yaml | 18 +++++++ .../singleton-nomsgr/all/librados_hello_world.yaml | 24 +++++++++ qa/suites/rados/singleton-nomsgr/all/msgr.yaml | 23 +++++++++ .../all/multi-backfill-reject.yaml | 50 ++++++++++++++++++ .../singleton-nomsgr/all/osd_stale_reads.yaml | 29 +++++++++++ .../rados/singleton-nomsgr/all/pool-access.yaml | 15 ++++++ .../all/recovery-unfound-found.yaml | 60 ++++++++++++++++++++++ .../all/version-number-sanity.yaml | 15 ++++++ qa/suites/rados/singleton-nomsgr/mon_election | 1 + qa/suites/rados/singleton-nomsgr/rados.yaml | 1 + .../singleton-nomsgr/supported-random-distro$ | 1 + 23 files changed, 501 insertions(+) create mode 100644 qa/suites/rados/singleton-nomsgr/% create mode 120000 qa/suites/rados/singleton-nomsgr/.qa create mode 120000 qa/suites/rados/singleton-nomsgr/all/.qa create mode 100644 qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/balancer.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/msgr.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/pool-access.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml create mode 120000 qa/suites/rados/singleton-nomsgr/mon_election create mode 120000 qa/suites/rados/singleton-nomsgr/rados.yaml create mode 120000 qa/suites/rados/singleton-nomsgr/supported-random-distro$ (limited to 'qa/suites/rados/singleton-nomsgr') diff --git a/qa/suites/rados/singleton-nomsgr/% b/qa/suites/rados/singleton-nomsgr/% new file mode 100644 index 000000000..e69de29bb diff --git a/qa/suites/rados/singleton-nomsgr/.qa b/qa/suites/rados/singleton-nomsgr/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/singleton-nomsgr/all/.qa b/qa/suites/rados/singleton-nomsgr/all/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml b/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml new file mode 100644 index 000000000..04c40197a --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml @@ -0,0 +1,27 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +roles: +- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0] +overrides: + ceph: + log-ignorelist: + - MDS in read-only mode + - force file system read-only + - overall HEALTH_ + - \(FS_DEGRADED\) + - \(OSDMAP_FLAGS\) + - \(OSD_FULL\) + - \(MDS_READ_ONLY\) + - \(POOL_FULL\) +tasks: +- install: +- ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force +- rgw: + - client.0 +- exec: + client.0: + - ceph_test_admin_socket_output --all diff --git a/qa/suites/rados/singleton-nomsgr/all/balancer.yaml b/qa/suites/rados/singleton-nomsgr/all/balancer.yaml new file mode 100644 index 000000000..d4c6e3ca5 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/balancer.yaml @@ -0,0 +1,14 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +tasks: +- install: +- ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + fs: xfs + log-ignorelist: + - \(PG_AVAILABILITY\) +- cram: + clients: + client.0: + - src/test/cli-integration/balancer/misplaced.t diff --git a/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml new file mode 100644 index 000000000..f998c51c9 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml @@ -0,0 +1,54 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1] +tasks: +- install: +- ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + log-ignorelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + conf: + global: + osd max object name len: 460 + osd max object namespace len: 64 + debug client: 20 + debug mds: 20 + debug ms: 1 +- exec: + client.0: + - ceph osd pool create data_cache 4 + - ceph osd tier add cephfs_data data_cache + - ceph osd tier cache-mode data_cache writeback + - ceph osd tier set-overlay cephfs_data data_cache + - ceph osd pool set data_cache hit_set_type bloom + - ceph osd pool set data_cache hit_set_count 8 + - ceph osd pool set data_cache hit_set_period 3600 + - ceph osd pool set data_cache min_read_recency_for_promote 0 +- ceph-fuse: +- exec: + client.0: + - sudo chmod 777 $TESTDIR/mnt.0/ + - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5 + - ls -al $TESTDIR/mnt.0/foo + - truncate --size 0 $TESTDIR/mnt.0/foo + - ls -al $TESTDIR/mnt.0/foo + - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5 + - ls -al $TESTDIR/mnt.0/foo + - cp $TESTDIR/mnt.0/foo /tmp/foo + - sync + - rados -p data_cache ls - + - sleep 10 + - rados -p data_cache ls - + - rados -p data_cache cache-flush-evict-all + - rados -p data_cache ls - + - sleep 1 +- exec: + client.1: + - hexdump -C /tmp/foo | head + - hexdump -C $TESTDIR/mnt.1/foo | head + - cmp $TESTDIR/mnt.1/foo /tmp/foo diff --git a/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml b/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml new file mode 100644 index 000000000..e116b5ae0 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml @@ -0,0 +1,23 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] + +overrides: + ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + log-ignorelist: + - but it is still running + - overall HEALTH_ + - \(POOL_APP_NOT_ENABLED\) + +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - cephtool/test_kvstore_tool.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml b/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml new file mode 100644 index 000000000..530dc42a7 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml @@ -0,0 +1,12 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +tasks: +- install: +- workunit: + clients: + all: + - post-file.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml new file mode 100644 index 000000000..ee800e5a7 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml @@ -0,0 +1,40 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +tasks: +- install: +- ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + log-ignorelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + conf: + global: + osd max object name len: 460 + osd max object namespace len: 64 +- exec: + client.0: + - ceph osd pool create base-pool 4 + - ceph osd pool application enable base-pool rados + - ceph osd pool create cache-pool 4 + - ceph osd tier add base-pool cache-pool + - ceph osd tier cache-mode cache-pool writeback + - ceph osd tier set-overlay base-pool cache-pool + - dd if=/dev/urandom of=$TESTDIR/foo bs=1M count=1 + - rbd import --image-format 2 $TESTDIR/foo base-pool/bar + - rbd snap create base-pool/bar@snap + - rados -p base-pool cache-flush-evict-all + - rbd export base-pool/bar $TESTDIR/bar + - rbd export base-pool/bar@snap $TESTDIR/snap + - cmp $TESTDIR/foo $TESTDIR/bar + - cmp $TESTDIR/foo $TESTDIR/snap + - rm $TESTDIR/foo $TESTDIR/bar $TESTDIR/snap diff --git a/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml new file mode 100644 index 000000000..8d26cd323 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml @@ -0,0 +1,40 @@ +# verify #13098 fix +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +overrides: + ceph: + log-ignorelist: + - is full + - overall HEALTH_ + - \(POOL_FULL\) + - \(POOL_NEAR_FULL\) + - \(CACHE_POOL_NO_HIT_SET\) + - \(CACHE_POOL_NEAR_FULL\) +tasks: +- install: +- ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + conf: + global: + osd max object name len: 460 + osd max object namespace len: 64 +- exec: + client.0: + - ceph osd pool create ec-ca 1 1 + - ceph osd pool create ec 1 1 erasure default + - ceph osd pool application enable ec rados + - ceph osd tier add ec ec-ca + - ceph osd tier cache-mode ec-ca readproxy + - ceph osd tier set-overlay ec ec-ca + - ceph osd pool set ec-ca hit_set_type bloom + - ceph osd pool set-quota ec-ca max_bytes 20480000 + - ceph osd pool set-quota ec max_bytes 20480000 + - ceph osd pool set ec-ca target_max_bytes 20480000 + - timeout 30 rados -p ec-ca bench 30 write || true + - ceph osd pool set-quota ec-ca max_bytes 0 + - ceph osd pool set-quota ec max_bytes 0 diff --git a/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml b/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml new file mode 100644 index 000000000..bc57a9cd9 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml @@ -0,0 +1,22 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, client.0] +tasks: +- install: +- ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + conf: + osd: +# we may land on ext4 + osd max object name len: 400 + osd max object namespace len: 64 + log-ignorelist: + - but it is still running + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ +- workunit: + clients: + all: + - rados/test_health_warnings.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml b/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml new file mode 100644 index 000000000..b08ab343f --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml @@ -0,0 +1,30 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +overrides: + ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + log-ignorelist: + - \(OSDMAP_FLAGS\) + - \(OSD_FULL\) + - \(MDS_READ_ONLY\) + - \(POOL_APP_NOT_ENABLED\) + - large omap objects + - Large omap object found + - application not enabled + conf: + osd: + osd scrub backoff ratio: 0 + osd deep scrub large omap object value sum threshold: 8800000 + osd deep scrub large omap object key threshold: 20000 +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - rados/test_large_omap_detection.py diff --git a/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml b/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml new file mode 100644 index 000000000..7228522be --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml @@ -0,0 +1,18 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +overrides: + ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) +tasks: +- install: +- ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force +- exec: + client.0: + - ceph_test_lazy_omap_stats diff --git a/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml b/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml new file mode 100644 index 000000000..f670a0849 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml @@ -0,0 +1,24 @@ +roles: +- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0] +overrides: + ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) +tasks: +- install: + extra_packages: + deb: + - libradosstriper-dev + - librados-dev + - libradospp-dev + rpm: + - libradosstriper-devel + - librados-devel + - libradospp-devel +- ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force +- workunit: + clients: + all: + - rados/test_librados_build.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/msgr.yaml b/qa/suites/rados/singleton-nomsgr/all/msgr.yaml new file mode 100644 index 000000000..d1852ae2b --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/msgr.yaml @@ -0,0 +1,23 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- exec: + client.0: + - ceph_test_async_driver + - ceph_test_msgr +openstack: + - machine: + disk: 40 # GB + ram: 15000 # MB + cpus: 1 + volumes: # attached to each instance + count: 0 + size: 1 # GB +overrides: + ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + conf: + client: + debug ms: 20 diff --git a/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml new file mode 100644 index 000000000..a3ce46e6a --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml @@ -0,0 +1,50 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +- - osd.3 + - osd.4 + - osd.5 +tasks: +- install: +- ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + log-ignorelist: + - overall HEALTH_ + - \(PG_ + - \(OSD_ + - \(OBJECT_ + conf: + osd: + osd debug reject backfill probability: .3 + osd min pg log entries: 25 + osd max pg log entries: 100 + osd max object name len: 460 + osd max object namespace len: 64 +- exec: + client.0: + - sudo ceph osd pool create foo 64 + - sudo ceph osd pool application enable foo rados + - rados -p foo bench 60 write -b 1024 --no-cleanup + - sudo ceph osd pool set foo size 3 + - sudo ceph osd out 0 1 +- sleep: + duration: 60 +- exec: + client.0: + - sudo ceph osd in 0 1 +- sleep: + duration: 60 +- exec: + client.0: + - sudo ceph osd pool set foo size 2 +- sleep: + duration: 300 diff --git a/qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml b/qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml new file mode 100644 index 000000000..408268a09 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/osd_stale_reads.yaml @@ -0,0 +1,29 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +overrides: + ceph: + log-ignorelist: + - \(OSD_DOWN\) + - \(POOL_APP_NOT_ENABLED\) + - \(SLOW_OPS\) + - \(PG_AVAILABILITY\) + - \(PG_DEGRADED\) + - application not enabled + - slow request + conf: + osd: + osd scrub backoff ratio: 0 + osd deep scrub large omap object value sum threshold: 8800000 + osd deep scrub large omap object key threshold: 20000 +tasks: +- install: +- ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force +- exec: + client.0: + - ceph_test_osd_stale_read diff --git a/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml b/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml new file mode 100644 index 000000000..6485a6871 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml @@ -0,0 +1,15 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force +- workunit: + clients: + all: + - rados/test_pool_access.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml b/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml new file mode 100644 index 000000000..9cf4eec89 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml @@ -0,0 +1,60 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 +openstack: + - volumes: # attached to each instance + count: 2 + size: 20 # GB +tasks: +- install: +- ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force + fs: xfs + conf: + osd: + osd recovery sleep: .1 + osd objectstore: filestore + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(OBJECT_ + - \(PG_ + - overall HEALTH +- exec: + osd.0: + - ceph osd pool create foo 32 + - ceph osd pool application enable foo foo + - rados -p foo bench 30 write -b 4096 --no-cleanup + - ceph osd set noup +- ceph.restart: + daemons: [osd.0] + wait-for-up: false + wait-for-healthy: false +- exec: + osd.0: + - sleep 5 + - rados -p foo bench 3 write -b 4096 --no-cleanup + - ceph osd unset noup + - sleep 10 + - ceph osd set noup +- ceph.restart: + daemons: [osd.1] + wait-for-up: false + wait-for-healthy: false +- exec: + osd.0: + - ceph osd out 0 + - sleep 10 + - ceph osd unset noup +- ceph.healthy: + wait-for-healthy: false # only wait for osds up and pgs clean, ignore misplaced +- exec: + osd.0: + - ceph osd in 0 +- ceph.healthy: diff --git a/qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml b/qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml new file mode 100644 index 000000000..6d48796f0 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/version-number-sanity.yaml @@ -0,0 +1,15 @@ +roles: +- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0] +overrides: + ceph: + log-ignorelist: + - \(POOL_APP_NOT_ENABLED\) +tasks: +- install: +- ceph: + pre-mgr-commands: + - sudo ceph config set mgr mgr/devicehealth/enable_monitoring false --force +- workunit: + clients: + all: + - rados/version_number_sanity.sh diff --git a/qa/suites/rados/singleton-nomsgr/mon_election b/qa/suites/rados/singleton-nomsgr/mon_election new file mode 120000 index 000000000..3f331e621 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/mon_election @@ -0,0 +1 @@ +.qa/mon_election \ No newline at end of file diff --git a/qa/suites/rados/singleton-nomsgr/rados.yaml b/qa/suites/rados/singleton-nomsgr/rados.yaml new file mode 120000 index 000000000..d256979c0 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml \ No newline at end of file diff --git a/qa/suites/rados/singleton-nomsgr/supported-random-distro$ b/qa/suites/rados/singleton-nomsgr/supported-random-distro$ new file mode 120000 index 000000000..7cef21eef --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file -- cgit v1.2.3