diff options
Diffstat (limited to 'qa/suites/rados/singleton-nomsgr')
20 files changed, 424 insertions, 0 deletions
diff --git a/qa/suites/rados/singleton-nomsgr/% b/qa/suites/rados/singleton-nomsgr/% new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/% diff --git a/qa/suites/rados/singleton-nomsgr/.qa b/qa/suites/rados/singleton-nomsgr/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/singleton-nomsgr/all/.qa b/qa/suites/rados/singleton-nomsgr/all/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml b/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml new file mode 100644 index 00000000..49f06b9a --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml @@ -0,0 +1,24 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +roles: +- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0] +overrides: + ceph: + log-whitelist: + - MDS in read-only mode + - force file system read-only + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_FULL\) + - \(MDS_READ_ONLY\) + - \(POOL_FULL\) +tasks: +- install: +- ceph: +- rgw: + - client.0 +- exec: + client.0: + - ceph_test_admin_socket_output --all diff --git a/qa/suites/rados/singleton-nomsgr/all/balancer.yaml b/qa/suites/rados/singleton-nomsgr/all/balancer.yaml new file mode 100644 index 00000000..75410508 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/balancer.yaml @@ -0,0 +1,10 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +tasks: +- install: +- ceph: + fs: xfs +- cram: + clients: + client.0: + - src/test/cli-integration/balancer/misplaced.t diff --git a/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml new file mode 100644 index 00000000..0a4bc498 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml @@ -0,0 +1,52 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1] +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + conf: + global: + osd max object name len: 460 + osd max object namespace len: 64 + debug client: 20 + debug mds: 20 + debug ms: 1 +- exec: + client.0: + - ceph osd pool create data_cache 4 + - ceph osd tier add cephfs_data data_cache + - ceph osd tier cache-mode data_cache writeback + - ceph osd tier set-overlay cephfs_data data_cache + - ceph osd pool set data_cache hit_set_type bloom + - ceph osd pool set data_cache hit_set_count 8 + - ceph osd pool set data_cache hit_set_period 3600 + - ceph osd pool set data_cache min_read_recency_for_promote 0 +- ceph-fuse: +- exec: + client.0: + - sudo chmod 777 $TESTDIR/mnt.0/ + - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5 + - ls -al $TESTDIR/mnt.0/foo + - truncate --size 0 $TESTDIR/mnt.0/foo + - ls -al $TESTDIR/mnt.0/foo + - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5 + - ls -al $TESTDIR/mnt.0/foo + - cp $TESTDIR/mnt.0/foo /tmp/foo + - sync + - rados -p data_cache ls - + - sleep 10 + - rados -p data_cache ls - + - rados -p data_cache cache-flush-evict-all + - rados -p data_cache ls - + - sleep 1 +- exec: + client.1: + - hexdump -C /tmp/foo | head + - hexdump -C $TESTDIR/mnt.1/foo | head + - cmp $TESTDIR/mnt.1/foo /tmp/foo diff --git a/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml b/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml new file mode 100644 index 00000000..a386e74e --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml @@ -0,0 +1,21 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] + +overrides: + ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(POOL_APP_NOT_ENABLED\) + +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - cephtool/test_kvstore_tool.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml b/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml new file mode 100644 index 00000000..530dc42a --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml @@ -0,0 +1,12 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +tasks: +- install: +- workunit: + clients: + all: + - post-file.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml new file mode 100644 index 00000000..e0887b85 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml @@ -0,0 +1,38 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + conf: + global: + osd max object name len: 460 + osd max object namespace len: 64 +- exec: + client.0: + - ceph osd pool create base-pool 4 + - ceph osd pool application enable base-pool rados + - ceph osd pool create cache-pool 4 + - ceph osd tier add base-pool cache-pool + - ceph osd tier cache-mode cache-pool writeback + - ceph osd tier set-overlay base-pool cache-pool + - dd if=/dev/urandom of=$TESTDIR/foo bs=1M count=1 + - rbd import --image-format 2 $TESTDIR/foo base-pool/bar + - rbd snap create base-pool/bar@snap + - rados -p base-pool cache-flush-evict-all + - rbd export base-pool/bar $TESTDIR/bar + - rbd export base-pool/bar@snap $TESTDIR/snap + - cmp $TESTDIR/foo $TESTDIR/bar + - cmp $TESTDIR/foo $TESTDIR/snap + - rm $TESTDIR/foo $TESTDIR/bar $TESTDIR/snap diff --git a/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml new file mode 100644 index 00000000..944b2f71 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml @@ -0,0 +1,38 @@ +# verify #13098 fix +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +overrides: + ceph: + log-whitelist: + - is full + - overall HEALTH_ + - \(POOL_FULL\) + - \(POOL_NEAR_FULL\) + - \(CACHE_POOL_NO_HIT_SET\) + - \(CACHE_POOL_NEAR_FULL\) +tasks: +- install: +- ceph: + conf: + global: + osd max object name len: 460 + osd max object namespace len: 64 +- exec: + client.0: + - ceph osd pool create ec-ca 1 1 + - ceph osd pool create ec 1 1 erasure default + - ceph osd pool application enable ec rados + - ceph osd tier add ec ec-ca + - ceph osd tier cache-mode ec-ca readproxy + - ceph osd tier set-overlay ec ec-ca + - ceph osd pool set ec-ca hit_set_type bloom + - ceph osd pool set-quota ec-ca max_bytes 20480000 + - ceph osd pool set-quota ec max_bytes 20480000 + - ceph osd pool set ec-ca target_max_bytes 20480000 + - timeout 30 rados -p ec-ca bench 30 write || true + - ceph osd pool set-quota ec-ca max_bytes 0 + - ceph osd pool set-quota ec max_bytes 0 diff --git a/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml b/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml new file mode 100644 index 00000000..a28582fd --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml @@ -0,0 +1,20 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, client.0] +tasks: +- install: +- ceph: + conf: + osd: +# we may land on ext4 + osd max object name len: 400 + osd max object namespace len: 64 + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ +- workunit: + clients: + all: + - rados/test_health_warnings.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml b/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml new file mode 100644 index 00000000..62794b4b --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml @@ -0,0 +1,27 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +overrides: + ceph: + log-whitelist: + - \(OSDMAP_FLAGS\) + - \(OSD_FULL\) + - \(MDS_READ_ONLY\) + - large omap objects + - Large omap object found + - application not enabled + conf: + osd: + osd scrub backoff ratio: 0 + osd deep scrub large omap object value sum threshold: 8800000 + osd deep scrub large omap object key threshold: 20000 +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - rados/test_large_omap_detection.py diff --git a/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml b/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml new file mode 100644 index 00000000..9fbdf0e0 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml @@ -0,0 +1,16 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +overrides: + ceph: + log-whitelist: + - \(POOL_APP_NOT_ENABLED\) +tasks: +- install: +- ceph: +- exec: + client.0: + - ceph_test_lazy_omap_stats diff --git a/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml b/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml new file mode 100644 index 00000000..2a96b94d --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml @@ -0,0 +1,22 @@ +roles: +- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0] +overrides: + ceph: + log-whitelist: + - \(POOL_APP_NOT_ENABLED\) +tasks: +- install: + extra_packages: + deb: + - libradosstriper-dev + - librados-dev + - libradospp-dev + rpm: + - libradosstriper-devel + - librados-devel + - libradospp-devel +- ceph: +- workunit: + clients: + all: + - rados/test_librados_build.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/msgr.yaml b/qa/suites/rados/singleton-nomsgr/all/msgr.yaml new file mode 100644 index 00000000..98b50952 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/msgr.yaml @@ -0,0 +1,21 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- exec: + client.0: + - ceph_test_async_driver + - ceph_test_msgr +openstack: + - machine: + disk: 40 # GB + ram: 15000 # MB + cpus: 1 + volumes: # attached to each instance + count: 0 + size: 1 # GB +overrides: + ceph: + conf: + client: + debug ms: 20 diff --git a/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml new file mode 100644 index 00000000..9800b5dd --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml @@ -0,0 +1,48 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +- - osd.3 + - osd.4 + - osd.5 +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(PG_ + - \(OSD_ + - \(OBJECT_ + conf: + osd: + osd debug reject backfill probability: .3 + osd min pg log entries: 25 + osd max pg log entries: 100 + osd max object name len: 460 + osd max object namespace len: 64 +- exec: + client.0: + - sudo ceph osd pool create foo 64 + - sudo ceph osd pool application enable foo rados + - rados -p foo bench 60 write -b 1024 --no-cleanup + - sudo ceph osd pool set foo size 3 + - sudo ceph osd out 0 1 +- sleep: + duration: 60 +- exec: + client.0: + - sudo ceph osd in 0 1 +- sleep: + duration: 60 +- exec: + client.0: + - sudo ceph osd pool set foo size 2 +- sleep: + duration: 300 diff --git a/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml b/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml new file mode 100644 index 00000000..c30aebb5 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml @@ -0,0 +1,13 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - rados/test_pool_access.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml b/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml new file mode 100644 index 00000000..ce0cbd9f --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml @@ -0,0 +1,58 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 +openstack: + - volumes: # attached to each instance + count: 2 + size: 20 # GB +tasks: +- install: +- ceph: + fs: xfs + conf: + osd: + osd recovery sleep: .1 + osd objectstore: filestore + log-whitelist: + - \(POOL_APP_NOT_ENABLED\) + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(OBJECT_ + - \(PG_ + - overall HEALTH +- exec: + osd.0: + - ceph osd pool create foo 32 + - ceph osd pool application enable foo foo + - rados -p foo bench 30 write -b 4096 --no-cleanup + - ceph osd set noup +- ceph.restart: + daemons: [osd.0] + wait-for-up: false + wait-for-healthy: false +- exec: + osd.0: + - sleep 5 + - rados -p foo bench 3 write -b 4096 --no-cleanup + - ceph osd unset noup + - sleep 10 + - ceph osd set noup +- ceph.restart: + daemons: [osd.1] + wait-for-up: false + wait-for-healthy: false +- exec: + osd.0: + - ceph osd out 0 + - sleep 10 + - ceph osd unset noup +- ceph.healthy: + wait-for-healthy: false # only wait for osds up and pgs clean, ignore misplaced +- exec: + osd.0: + - ceph osd in 0 +- ceph.healthy: diff --git a/qa/suites/rados/singleton-nomsgr/rados.yaml b/qa/suites/rados/singleton-nomsgr/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml
\ No newline at end of file diff --git a/qa/suites/rados/singleton-nomsgr/supported-random-distro$ b/qa/suites/rados/singleton-nomsgr/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$
\ No newline at end of file |