diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 18:24:20 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 18:24:20 +0000 |
commit | 483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch) | |
tree | e5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /qa/suites/rados/singleton | |
parent | Initial commit. (diff) | |
download | ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip |
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
85 files changed, 1748 insertions, 0 deletions
diff --git a/qa/suites/rados/singleton-bluestore/% b/qa/suites/rados/singleton-bluestore/% new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/% diff --git a/qa/suites/rados/singleton-bluestore/.qa b/qa/suites/rados/singleton-bluestore/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/all/.qa b/qa/suites/rados/singleton-bluestore/all/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/all/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/all/cephtool.yaml b/qa/suites/rados/singleton-bluestore/all/cephtool.yaml new file mode 100644 index 00000000..0567b603 --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/all/cephtool.yaml @@ -0,0 +1,44 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - had wrong client addr + - had wrong cluster addr + - must scrub before tier agent can activate + - failsafe engaged, dropping updates + - failsafe disengaged, no longer dropping updates + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(SMALLER_PG_NUM\) + - \(SMALLER_PGP_NUM\) + - \(CACHE_POOL_NO_HIT_SET\) + - \(CACHE_POOL_NEAR_FULL\) + - \(FS_WITH_FAILED_MDS\) + - \(FS_DEGRADED\) + - \(POOL_BACKFILLFULL\) + - \(POOL_FULL\) + - \(SMALLER_PGP_NUM\) + - \(POOL_NEARFULL\) + - \(POOL_APP_NOT_ENABLED\) + - \(AUTH_BAD_CAPS\) +- workunit: + clients: + all: + - cephtool + - mon/pool_ops.sh diff --git a/qa/suites/rados/singleton-bluestore/msgr b/qa/suites/rados/singleton-bluestore/msgr new file mode 120000 index 00000000..57bee80d --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/msgr @@ -0,0 +1 @@ +.qa/msgr
\ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/msgr-failures/.qa b/qa/suites/rados/singleton-bluestore/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml b/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml b/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml new file mode 100644 index 00000000..59ca5c0f --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 1000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/singleton-bluestore/objectstore/.qa b/qa/suites/rados/singleton-bluestore/objectstore/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/objectstore/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/objectstore/bluestore-bitmap.yaml b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml
\ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-lz4.yaml b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-lz4.yaml new file mode 120000 index 00000000..4fb2ff6c --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-lz4.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-comp-lz4.yaml
\ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml new file mode 120000 index 00000000..888caf55 --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-comp-snappy.yaml
\ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/rados.yaml b/qa/suites/rados/singleton-bluestore/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml
\ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/supported-random-distro$ b/qa/suites/rados/singleton-bluestore/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$
\ No newline at end of file diff --git a/qa/suites/rados/singleton-flat/.qa b/qa/suites/rados/singleton-flat/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton-flat/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/singleton-flat/valgrind-leaks.yaml b/qa/suites/rados/singleton-flat/valgrind-leaks.yaml new file mode 100644 index 00000000..d3180d9b --- /dev/null +++ b/qa/suites/rados/singleton-flat/valgrind-leaks.yaml @@ -0,0 +1,36 @@ +# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126 +os_type: centos +os_version: '7.8' + +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB + +overrides: + install: + ceph: + debuginfo: true + ceph: + log-whitelist: + - overall HEALTH_ + - \(PG_ + conf: + global: + osd heartbeat grace: 40 + debug deliberately leak memory: true + osd max object name len: 460 + osd max object namespace len: 64 + mon: + mon osd crush smoke test: false + osd: + osd fast shutdown: false + valgrind: + mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] + osd: [--tool=memcheck] +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + expect_valgrind_errors: true diff --git a/qa/suites/rados/singleton-nomsgr/% b/qa/suites/rados/singleton-nomsgr/% new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/% diff --git a/qa/suites/rados/singleton-nomsgr/.qa b/qa/suites/rados/singleton-nomsgr/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/singleton-nomsgr/all/.qa b/qa/suites/rados/singleton-nomsgr/all/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml b/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml new file mode 100644 index 00000000..49f06b9a --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml @@ -0,0 +1,24 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +roles: +- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0] +overrides: + ceph: + log-whitelist: + - MDS in read-only mode + - force file system read-only + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_FULL\) + - \(MDS_READ_ONLY\) + - \(POOL_FULL\) +tasks: +- install: +- ceph: +- rgw: + - client.0 +- exec: + client.0: + - ceph_test_admin_socket_output --all diff --git a/qa/suites/rados/singleton-nomsgr/all/balancer.yaml b/qa/suites/rados/singleton-nomsgr/all/balancer.yaml new file mode 100644 index 00000000..75410508 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/balancer.yaml @@ -0,0 +1,10 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +tasks: +- install: +- ceph: + fs: xfs +- cram: + clients: + client.0: + - src/test/cli-integration/balancer/misplaced.t diff --git a/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml new file mode 100644 index 00000000..0a4bc498 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml @@ -0,0 +1,52 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1] +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + conf: + global: + osd max object name len: 460 + osd max object namespace len: 64 + debug client: 20 + debug mds: 20 + debug ms: 1 +- exec: + client.0: + - ceph osd pool create data_cache 4 + - ceph osd tier add cephfs_data data_cache + - ceph osd tier cache-mode data_cache writeback + - ceph osd tier set-overlay cephfs_data data_cache + - ceph osd pool set data_cache hit_set_type bloom + - ceph osd pool set data_cache hit_set_count 8 + - ceph osd pool set data_cache hit_set_period 3600 + - ceph osd pool set data_cache min_read_recency_for_promote 0 +- ceph-fuse: +- exec: + client.0: + - sudo chmod 777 $TESTDIR/mnt.0/ + - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5 + - ls -al $TESTDIR/mnt.0/foo + - truncate --size 0 $TESTDIR/mnt.0/foo + - ls -al $TESTDIR/mnt.0/foo + - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5 + - ls -al $TESTDIR/mnt.0/foo + - cp $TESTDIR/mnt.0/foo /tmp/foo + - sync + - rados -p data_cache ls - + - sleep 10 + - rados -p data_cache ls - + - rados -p data_cache cache-flush-evict-all + - rados -p data_cache ls - + - sleep 1 +- exec: + client.1: + - hexdump -C /tmp/foo | head + - hexdump -C $TESTDIR/mnt.1/foo | head + - cmp $TESTDIR/mnt.1/foo /tmp/foo diff --git a/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml b/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml new file mode 100644 index 00000000..a386e74e --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml @@ -0,0 +1,21 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] + +overrides: + ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(POOL_APP_NOT_ENABLED\) + +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - cephtool/test_kvstore_tool.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml b/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml new file mode 100644 index 00000000..530dc42a --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml @@ -0,0 +1,12 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +tasks: +- install: +- workunit: + clients: + all: + - post-file.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml new file mode 100644 index 00000000..e0887b85 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml @@ -0,0 +1,38 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + conf: + global: + osd max object name len: 460 + osd max object namespace len: 64 +- exec: + client.0: + - ceph osd pool create base-pool 4 + - ceph osd pool application enable base-pool rados + - ceph osd pool create cache-pool 4 + - ceph osd tier add base-pool cache-pool + - ceph osd tier cache-mode cache-pool writeback + - ceph osd tier set-overlay base-pool cache-pool + - dd if=/dev/urandom of=$TESTDIR/foo bs=1M count=1 + - rbd import --image-format 2 $TESTDIR/foo base-pool/bar + - rbd snap create base-pool/bar@snap + - rados -p base-pool cache-flush-evict-all + - rbd export base-pool/bar $TESTDIR/bar + - rbd export base-pool/bar@snap $TESTDIR/snap + - cmp $TESTDIR/foo $TESTDIR/bar + - cmp $TESTDIR/foo $TESTDIR/snap + - rm $TESTDIR/foo $TESTDIR/bar $TESTDIR/snap diff --git a/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml new file mode 100644 index 00000000..944b2f71 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml @@ -0,0 +1,38 @@ +# verify #13098 fix +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +overrides: + ceph: + log-whitelist: + - is full + - overall HEALTH_ + - \(POOL_FULL\) + - \(POOL_NEAR_FULL\) + - \(CACHE_POOL_NO_HIT_SET\) + - \(CACHE_POOL_NEAR_FULL\) +tasks: +- install: +- ceph: + conf: + global: + osd max object name len: 460 + osd max object namespace len: 64 +- exec: + client.0: + - ceph osd pool create ec-ca 1 1 + - ceph osd pool create ec 1 1 erasure default + - ceph osd pool application enable ec rados + - ceph osd tier add ec ec-ca + - ceph osd tier cache-mode ec-ca readproxy + - ceph osd tier set-overlay ec ec-ca + - ceph osd pool set ec-ca hit_set_type bloom + - ceph osd pool set-quota ec-ca max_bytes 20480000 + - ceph osd pool set-quota ec max_bytes 20480000 + - ceph osd pool set ec-ca target_max_bytes 20480000 + - timeout 30 rados -p ec-ca bench 30 write || true + - ceph osd pool set-quota ec-ca max_bytes 0 + - ceph osd pool set-quota ec max_bytes 0 diff --git a/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml b/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml new file mode 100644 index 00000000..a28582fd --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml @@ -0,0 +1,20 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, client.0] +tasks: +- install: +- ceph: + conf: + osd: +# we may land on ext4 + osd max object name len: 400 + osd max object namespace len: 64 + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ +- workunit: + clients: + all: + - rados/test_health_warnings.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml b/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml new file mode 100644 index 00000000..62794b4b --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml @@ -0,0 +1,27 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +overrides: + ceph: + log-whitelist: + - \(OSDMAP_FLAGS\) + - \(OSD_FULL\) + - \(MDS_READ_ONLY\) + - large omap objects + - Large omap object found + - application not enabled + conf: + osd: + osd scrub backoff ratio: 0 + osd deep scrub large omap object value sum threshold: 8800000 + osd deep scrub large omap object key threshold: 20000 +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - rados/test_large_omap_detection.py diff --git a/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml b/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml new file mode 100644 index 00000000..9fbdf0e0 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml @@ -0,0 +1,16 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +overrides: + ceph: + log-whitelist: + - \(POOL_APP_NOT_ENABLED\) +tasks: +- install: +- ceph: +- exec: + client.0: + - ceph_test_lazy_omap_stats diff --git a/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml b/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml new file mode 100644 index 00000000..2a96b94d --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml @@ -0,0 +1,22 @@ +roles: +- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0] +overrides: + ceph: + log-whitelist: + - \(POOL_APP_NOT_ENABLED\) +tasks: +- install: + extra_packages: + deb: + - libradosstriper-dev + - librados-dev + - libradospp-dev + rpm: + - libradosstriper-devel + - librados-devel + - libradospp-devel +- ceph: +- workunit: + clients: + all: + - rados/test_librados_build.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/msgr.yaml b/qa/suites/rados/singleton-nomsgr/all/msgr.yaml new file mode 100644 index 00000000..98b50952 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/msgr.yaml @@ -0,0 +1,21 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- exec: + client.0: + - ceph_test_async_driver + - ceph_test_msgr +openstack: + - machine: + disk: 40 # GB + ram: 15000 # MB + cpus: 1 + volumes: # attached to each instance + count: 0 + size: 1 # GB +overrides: + ceph: + conf: + client: + debug ms: 20 diff --git a/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml new file mode 100644 index 00000000..9800b5dd --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml @@ -0,0 +1,48 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +- - osd.3 + - osd.4 + - osd.5 +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(PG_ + - \(OSD_ + - \(OBJECT_ + conf: + osd: + osd debug reject backfill probability: .3 + osd min pg log entries: 25 + osd max pg log entries: 100 + osd max object name len: 460 + osd max object namespace len: 64 +- exec: + client.0: + - sudo ceph osd pool create foo 64 + - sudo ceph osd pool application enable foo rados + - rados -p foo bench 60 write -b 1024 --no-cleanup + - sudo ceph osd pool set foo size 3 + - sudo ceph osd out 0 1 +- sleep: + duration: 60 +- exec: + client.0: + - sudo ceph osd in 0 1 +- sleep: + duration: 60 +- exec: + client.0: + - sudo ceph osd pool set foo size 2 +- sleep: + duration: 300 diff --git a/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml b/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml new file mode 100644 index 00000000..c30aebb5 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml @@ -0,0 +1,13 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - rados/test_pool_access.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml b/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml new file mode 100644 index 00000000..ce0cbd9f --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml @@ -0,0 +1,58 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 +openstack: + - volumes: # attached to each instance + count: 2 + size: 20 # GB +tasks: +- install: +- ceph: + fs: xfs + conf: + osd: + osd recovery sleep: .1 + osd objectstore: filestore + log-whitelist: + - \(POOL_APP_NOT_ENABLED\) + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(OBJECT_ + - \(PG_ + - overall HEALTH +- exec: + osd.0: + - ceph osd pool create foo 32 + - ceph osd pool application enable foo foo + - rados -p foo bench 30 write -b 4096 --no-cleanup + - ceph osd set noup +- ceph.restart: + daemons: [osd.0] + wait-for-up: false + wait-for-healthy: false +- exec: + osd.0: + - sleep 5 + - rados -p foo bench 3 write -b 4096 --no-cleanup + - ceph osd unset noup + - sleep 10 + - ceph osd set noup +- ceph.restart: + daemons: [osd.1] + wait-for-up: false + wait-for-healthy: false +- exec: + osd.0: + - ceph osd out 0 + - sleep 10 + - ceph osd unset noup +- ceph.healthy: + wait-for-healthy: false # only wait for osds up and pgs clean, ignore misplaced +- exec: + osd.0: + - ceph osd in 0 +- ceph.healthy: diff --git a/qa/suites/rados/singleton-nomsgr/rados.yaml b/qa/suites/rados/singleton-nomsgr/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml
\ No newline at end of file diff --git a/qa/suites/rados/singleton-nomsgr/supported-random-distro$ b/qa/suites/rados/singleton-nomsgr/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$
\ No newline at end of file diff --git a/qa/suites/rados/singleton/% b/qa/suites/rados/singleton/% new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/qa/suites/rados/singleton/% diff --git a/qa/suites/rados/singleton/.qa b/qa/suites/rados/singleton/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/singleton/all/.qa b/qa/suites/rados/singleton/all/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton/all/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/singleton/all/admin-socket.yaml b/qa/suites/rados/singleton/all/admin-socket.yaml new file mode 100644 index 00000000..13af8131 --- /dev/null +++ b/qa/suites/rados/singleton/all/admin-socket.yaml @@ -0,0 +1,26 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - client.a +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +tasks: +- install: +- ceph: +- admin_socket: + osd.0: + version: + git_version: + help: + config show: + config help: + config set filestore_dump_file /tmp/foo: + perf dump: + perf schema: + get_heap_property tcmalloc.max_total_thread_cache_byte: + set_heap_property tcmalloc.max_total_thread_cache_bytes 67108864: + set_heap_property tcmalloc.max_total_thread_cache_bytes 33554432: diff --git a/qa/suites/rados/singleton/all/deduptool.yaml b/qa/suites/rados/singleton/all/deduptool.yaml new file mode 100644 index 00000000..f2c54f1a --- /dev/null +++ b/qa/suites/rados/singleton/all/deduptool.yaml @@ -0,0 +1,26 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - had wrong client addr + - had wrong cluster addr + - reached quota + - overall HEALTH_ + - \(POOL_FULL\) + - \(POOL_APP_NOT_ENABLED\) +- workunit: + clients: + all: + - rados/test_dedup_tool.sh diff --git a/qa/suites/rados/singleton/all/divergent_priors.yaml b/qa/suites/rados/singleton/all/divergent_priors.yaml new file mode 100644 index 00000000..743d73d4 --- /dev/null +++ b/qa/suites/rados/singleton/all/divergent_priors.yaml @@ -0,0 +1,26 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB + +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_ + - \(POOL_APP_NOT_ENABLED\) + +tasks: +- install: +- ceph: +- divergent_priors: diff --git a/qa/suites/rados/singleton/all/divergent_priors2.yaml b/qa/suites/rados/singleton/all/divergent_priors2.yaml new file mode 100644 index 00000000..2da2c466 --- /dev/null +++ b/qa/suites/rados/singleton/all/divergent_priors2.yaml @@ -0,0 +1,26 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB + +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_ + - \(POOL_APP_NOT_ENABLED\) + +tasks: +- install: +- ceph: +- divergent_priors2: diff --git a/qa/suites/rados/singleton/all/dump-stuck.yaml b/qa/suites/rados/singleton/all/dump-stuck.yaml new file mode 100644 index 00000000..59085ffa --- /dev/null +++ b/qa/suites/rados/singleton/all/dump-stuck.yaml @@ -0,0 +1,19 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ +- dump_stuck: diff --git a/qa/suites/rados/singleton/all/ec-lost-unfound.yaml b/qa/suites/rados/singleton/all/ec-lost-unfound.yaml new file mode 100644 index 00000000..aeb4b278 --- /dev/null +++ b/qa/suites/rados/singleton/all/ec-lost-unfound.yaml @@ -0,0 +1,26 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - objects unfound and apparently lost + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_ + - \(SLOW_OPS\) + - slow request +- ec_lost_unfound: diff --git a/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml b/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml new file mode 100644 index 00000000..e8201ee0 --- /dev/null +++ b/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml @@ -0,0 +1,17 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- workunit: + clients: + all: + - erasure-code/encode-decode-non-regression.sh diff --git a/qa/suites/rados/singleton/all/lost-unfound-delete.yaml b/qa/suites/rados/singleton/all/lost-unfound-delete.yaml new file mode 100644 index 00000000..636cb944 --- /dev/null +++ b/qa/suites/rados/singleton/all/lost-unfound-delete.yaml @@ -0,0 +1,25 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - objects unfound and apparently lost + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_ + - \(SLOW_OPS\) + - slow request +- rep_lost_unfound_delete: diff --git a/qa/suites/rados/singleton/all/lost-unfound.yaml b/qa/suites/rados/singleton/all/lost-unfound.yaml new file mode 100644 index 00000000..2f60db16 --- /dev/null +++ b/qa/suites/rados/singleton/all/lost-unfound.yaml @@ -0,0 +1,25 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - objects unfound and apparently lost + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_ + - \(SLOW_OPS\) + - slow request +- lost_unfound: diff --git a/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml b/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml new file mode 100644 index 00000000..b8a7feae --- /dev/null +++ b/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml @@ -0,0 +1,27 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +overrides: + ceph: + create_rbd_pool: False + conf: + mon: + osd pool default size: 2 + osd: + mon max pg per osd : 2 + osd max pg per osd hard ratio : 1 + log-whitelist: + - \(TOO_FEW_PGS\) + - \(PENDING_CREATING_PGS\) +tasks: +- install: +- ceph: +- osd_max_pg_per_osd: + test_create_from_mon: True + pg_num: 2 diff --git a/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml b/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml new file mode 100644 index 00000000..8ffc9a31 --- /dev/null +++ b/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml @@ -0,0 +1,32 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB +overrides: + ceph: + create_rbd_pool: False + conf: + mon: + osd pool default size: 2 + osd: + mon max pg per osd : 1 + osd max pg per osd hard ratio : 1 + log-whitelist: + - \(TOO_FEW_PGS\) + - \(PG_ + - \(PENDING_CREATING_PGS\) +tasks: +- install: +- ceph: +- osd_max_pg_per_osd: + test_create_from_mon: False + pg_num: 1 + pool_size: 2 + from_primary: True diff --git a/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml b/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml new file mode 100644 index 00000000..8da365dd --- /dev/null +++ b/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml @@ -0,0 +1,32 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB +overrides: + ceph: + create_rbd_pool: False + conf: + mon: + osd pool default size: 2 + osd: + mon max pg per osd : 1 + osd max pg per osd hard ratio : 1 + log-whitelist: + - \(TOO_FEW_PGS\) + - \(PG_ + - \(PENDING_CREATING_PGS\) +tasks: +- install: +- ceph: +- osd_max_pg_per_osd: + test_create_from_mon: False + pg_num: 1 + pool_size: 2 + from_primary: False diff --git a/qa/suites/rados/singleton/all/mon-auth-caps.yaml b/qa/suites/rados/singleton/all/mon-auth-caps.yaml new file mode 100644 index 00000000..ae4a5d2e --- /dev/null +++ b/qa/suites/rados/singleton/all/mon-auth-caps.yaml @@ -0,0 +1,17 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(AUTH_BAD_CAPS\) +- workunit: + clients: + all: + - mon/auth_caps.sh diff --git a/qa/suites/rados/singleton/all/mon-config-key-caps.yaml b/qa/suites/rados/singleton/all/mon-config-key-caps.yaml new file mode 100644 index 00000000..0b0b95c5 --- /dev/null +++ b/qa/suites/rados/singleton/all/mon-config-key-caps.yaml @@ -0,0 +1,17 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(AUTH_BAD_CAPS\) +- workunit: + clients: + all: + - mon/test_config_key_caps.sh diff --git a/qa/suites/rados/singleton/all/mon-config-keys.yaml b/qa/suites/rados/singleton/all/mon-config-keys.yaml new file mode 100644 index 00000000..7bb4f650 --- /dev/null +++ b/qa/suites/rados/singleton/all/mon-config-keys.yaml @@ -0,0 +1,20 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - mon/test_mon_config_key.py diff --git a/qa/suites/rados/singleton/all/mon-config.yaml b/qa/suites/rados/singleton/all/mon-config.yaml new file mode 100644 index 00000000..2d9de8bb --- /dev/null +++ b/qa/suites/rados/singleton/all/mon-config.yaml @@ -0,0 +1,20 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - mon/config.sh diff --git a/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled b/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled new file mode 100644 index 00000000..7f9dd495 --- /dev/null +++ b/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled @@ -0,0 +1,152 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 + - osd.4 + - osd.5 + - osd.6 + - osd.7 + - osd.8 + - osd.9 + - osd.10 + - osd.11 + - osd.12 + - osd.13 + - osd.14 + - client.0 +openstack: + - volumes: # attached to each instance + count: 4 + size: 1 # GB +overrides: + ceph: + conf: + mon: + mon memory target: 134217728 # reduced to 128_M + rocksdb cache size: 67108864 # reduced to 64_M + mon osd cache size: 100000 + mon osd cache size min: 134217728 + osd: + osd memory target: 1610612736 # reduced to 1.5_G + osd objectstore: bluestore + debug bluestore: 20 + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 9 + +tasks: +- install: + branch: wip-sseshasa2-testing-2019-07-30-1825 # change as appropriate +- ceph: + create_rbd_pool: false + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + - \(OBJECT_ + - \(SLOW_OPS\) + - \(REQUEST_SLOW\) + - \(TOO_FEW_PGS\) + - slow request +- interactive: +- parallel: + - log-mon-rss + - stress-tasks + - benchload +- exec: + client.0: + - "ceph_test_mon_memory_target 134217728" # mon memory target + - "ceph_test_mon_rss_usage 134217728" +log-mon-rss: +- background_exec: + client.0: + - while true + - do /usr/bin/ceph_test_log_rss_usage ceph-mon >> /var/log/ceph/ceph-mon-rss-usage.log + - sleep 300 # log rss usage every 5 mins. May be modified accordingly + - done +- exec: + client.0: + - sleep 37860 # sum total of the radosbench test times below plus 60 secs +benchload: # The total radosbench test below translates to 10.5 hrs +- full_sequential: + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 +stress-tasks: +- thrashosds: + op_delay: 1 + bdev_inject_crash: 1 + bdev_inject_crash_probability: .8 + chance_down: 80 + chance_pgnum_grow: 3 + chance_pgpnum_fix: 1 + chance_thrash_cluster_full: 0 + chance_thrash_pg_upmap: 3 + chance_thrash_pg_upmap_items: 3 + min_in: 2 diff --git a/qa/suites/rados/singleton/all/osd-backfill.yaml b/qa/suites/rados/singleton/all/osd-backfill.yaml new file mode 100644 index 00000000..5b374071 --- /dev/null +++ b/qa/suites/rados/singleton/all/osd-backfill.yaml @@ -0,0 +1,26 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_ + conf: + osd: + osd min pg log entries: 5 +- osd_backfill: diff --git a/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml b/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml new file mode 100644 index 00000000..ed5b216b --- /dev/null +++ b/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml @@ -0,0 +1,28 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_ + conf: + osd: + osd min pg log entries: 5 + osd_fast_fail_on_connection_refused: false +- osd_recovery.test_incomplete_pgs: diff --git a/qa/suites/rados/singleton/all/osd-recovery.yaml b/qa/suites/rados/singleton/all/osd-recovery.yaml new file mode 100644 index 00000000..d937a8db --- /dev/null +++ b/qa/suites/rados/singleton/all/osd-recovery.yaml @@ -0,0 +1,30 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_DEGRADED\) + - \(SLOW_OPS\) + - slow request + conf: + osd: + osd min pg log entries: 5 + osd pg log trim min: 0 + osd_fast_fail_on_connection_refused: false +- osd_recovery: diff --git a/qa/suites/rados/singleton/all/peer.yaml b/qa/suites/rados/singleton/all/peer.yaml new file mode 100644 index 00000000..645034a4 --- /dev/null +++ b/qa/suites/rados/singleton/all/peer.yaml @@ -0,0 +1,25 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + config: + global: + osd pool default min size : 1 + log-whitelist: + - objects unfound and apparently lost + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ +- peer: diff --git a/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml b/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml new file mode 100644 index 00000000..2784b7e3 --- /dev/null +++ b/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml @@ -0,0 +1,42 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 + - client.0 +- - mon.b + - mon.c + - osd.4 + - osd.5 + - osd.6 + - osd.7 +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB +tasks: +- install: +- ceph: + create_rbd_pool: false + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + - \(OBJECT_ + - \(SLOW_OPS\) + - \(REQUEST_SLOW\) + - \(TOO_FEW_PGS\) + - slow request +- exec: + client.0: + - ceph progress off + +- workunit: + clients: + all: + - mon/pg_autoscaler.sh diff --git a/qa/suites/rados/singleton/all/pg-autoscaler.yaml b/qa/suites/rados/singleton/all/pg-autoscaler.yaml new file mode 100644 index 00000000..72e18d52 --- /dev/null +++ b/qa/suites/rados/singleton/all/pg-autoscaler.yaml @@ -0,0 +1,38 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 + - client.0 +- - mon.b + - mon.c + - osd.4 + - osd.5 + - osd.6 + - osd.7 +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB +tasks: +- install: +- ceph: + create_rbd_pool: false + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + - \(OBJECT_ + - \(SLOW_OPS\) + - \(REQUEST_SLOW\) + - \(TOO_FEW_PGS\) + - slow request +- workunit: + clients: + all: + - mon/pg_autoscaler.sh diff --git a/qa/suites/rados/singleton/all/pg-removal-interruption.yaml b/qa/suites/rados/singleton/all/pg-removal-interruption.yaml new file mode 100644 index 00000000..3ada5518 --- /dev/null +++ b/qa/suites/rados/singleton/all/pg-removal-interruption.yaml @@ -0,0 +1,34 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - slow request + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ +- exec: + client.0: + - sudo ceph osd pool create foo 128 128 + - sudo ceph osd pool application enable foo rados + - sleep 5 + - sudo ceph tell osd.0 injectargs -- --osd-inject-failure-on-pg-removal + - sudo ceph osd pool delete foo foo --yes-i-really-really-mean-it +- ceph.wait_for_failure: [osd.0] +- exec: + client.0: + - sudo ceph osd down 0 +- ceph.restart: [osd.0] +- ceph.healthy: diff --git a/qa/suites/rados/singleton/all/radostool.yaml b/qa/suites/rados/singleton/all/radostool.yaml new file mode 100644 index 00000000..18277953 --- /dev/null +++ b/qa/suites/rados/singleton/all/radostool.yaml @@ -0,0 +1,26 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - had wrong client addr + - had wrong cluster addr + - reached quota + - overall HEALTH_ + - \(POOL_FULL\) + - \(POOL_APP_NOT_ENABLED\) +- workunit: + clients: + all: + - rados/test_rados_tool.sh diff --git a/qa/suites/rados/singleton/all/random-eio.yaml b/qa/suites/rados/singleton/all/random-eio.yaml new file mode 100644 index 00000000..5df910b8 --- /dev/null +++ b/qa/suites/rados/singleton/all/random-eio.yaml @@ -0,0 +1,44 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 +- - osd.3 + - osd.4 + - osd.5 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - missing primary copy of + - objects unfound and apparently lost + - had a read error + - overall HEALTH_ + - \(POOL_APP_NOT_ENABLED\) + - \(PG_DEGRADED\) + - \(OSD_TOO_MANY_REPAIRS\) +- full_sequential: + - exec: + client.0: + - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.33 + - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.33 + - sudo ceph osd pool create test 16 16 + - sudo ceph osd pool set test size 3 + - sudo ceph pg dump pgs --format=json-pretty + - radosbench: + clients: [client.0] + time: 360 + type: rand + objectsize: 1048576 + pool: test + create_pool: false + - exec: + client.0: + - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.0 + - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.0 diff --git a/qa/suites/rados/singleton/all/rebuild-mondb.yaml b/qa/suites/rados/singleton/all/rebuild-mondb.yaml new file mode 100644 index 00000000..cc1c6809 --- /dev/null +++ b/qa/suites/rados/singleton/all/rebuild-mondb.yaml @@ -0,0 +1,32 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - no reply from + - overall HEALTH_ + - \(MON_DOWN\) + - \(MGR_DOWN\) + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ +- full_sequential: + - radosbench: + clients: [client.0] + time: 30 + - rebuild_mondb: + - radosbench: + clients: [client.0] + time: 30 diff --git a/qa/suites/rados/singleton/all/recovery-preemption.yaml b/qa/suites/rados/singleton/all/recovery-preemption.yaml new file mode 100644 index 00000000..fbf1772c --- /dev/null +++ b/qa/suites/rados/singleton/all/recovery-preemption.yaml @@ -0,0 +1,57 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +openstack: + - volumes: # attached to each instance + count: 3 + size: 20 # GB +tasks: +- install: +- ceph: + conf: + osd: + osd recovery sleep: .1 + osd min pg log entries: 10 + osd max pg log entries: 1000 + osd pg log trim min: 10 + log-whitelist: + - \(POOL_APP_NOT_ENABLED\) + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(OBJECT_ + - \(PG_ + - \(SLOW_OPS\) + - overall HEALTH + - slow request +- exec: + osd.0: + - ceph osd pool create foo 128 + - ceph osd pool application enable foo foo + - sleep 5 +- ceph.healthy: +- exec: + osd.0: + - rados -p foo bench 30 write -b 4096 --no-cleanup + - ceph osd out 0 + - sleep 5 + - ceph osd set noup +- ceph.restart: + daemons: [osd.1] + wait-for-up: false + wait-for-healthy: false +- exec: + osd.0: + - rados -p foo bench 3 write -b 4096 --no-cleanup + - ceph osd unset noup + - sleep 10 + - for f in 0 1 2 3 ; do sudo ceph daemon osd.$f config set osd_recovery_sleep 0 ; sudo ceph daemon osd.$f config set osd_recovery_max_active 20 ; done +- ceph.healthy: +- exec: + osd.0: + - egrep '(defer backfill|defer recovery)' /var/log/ceph/ceph-osd.*.log diff --git a/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml b/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml new file mode 100644 index 00000000..3eddce82 --- /dev/null +++ b/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml @@ -0,0 +1,17 @@ +roles: +- [mon.a, mgr.x] +- [osd.0, osd.1, osd.2, client.0] + +tasks: +- install: +- ceph: + fs: xfs + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_DEGRADED\) + - \(POOL_APP_NOT_ENABLED\) +- resolve_stuck_peering: + diff --git a/qa/suites/rados/singleton/all/test-crash.yaml b/qa/suites/rados/singleton/all/test-crash.yaml new file mode 100644 index 00000000..8002deaa --- /dev/null +++ b/qa/suites/rados/singleton/all/test-crash.yaml @@ -0,0 +1,15 @@ +roles: + - [client.0, mon.a, mgr.x, osd.0, osd.1, osd.2] + +tasks: + - install: + - ceph: + log-whitelist: + - Reduced data availability + - OSD_.*DOWN + - \(RECENT_CRASH\) + - workunit: + clients: + client.0: + - rados/test_crash.sh + - ceph.restart: [osd.*] diff --git a/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml b/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml new file mode 100644 index 00000000..42c8ae39 --- /dev/null +++ b/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml @@ -0,0 +1,19 @@ +overrides: + ceph: + fs: ext4 + conf: + global: + osd max object name len: 460 + osd max object namespace len: 64 +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(POOL_APP_NOT_ENABLED\) +- workunit: + clients: + all: + - rados/test_envlibrados_for_rocksdb.sh diff --git a/qa/suites/rados/singleton/all/thrash-backfill-full.yaml b/qa/suites/rados/singleton/all/thrash-backfill-full.yaml new file mode 100644 index 00000000..5cd32bd5 --- /dev/null +++ b/qa/suites/rados/singleton/all/thrash-backfill-full.yaml @@ -0,0 +1,50 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 +- - osd.3 + - osd.4 + - osd.5 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +override: + ceph: + conf: + mon: + osd default pool size: 3 + osd min pg log entries: 5 + osd max pg log entries: 10 +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - missing primary copy of + - objects unfound and apparently lost + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(SLOW_OPS\) + - \(PG_ + - \(OBJECT_MISPLACED\) + - \(OSD_ + - \(OBJECT_ + - \(TOO_FEW_PGS\) + - \(POOL_BACKFILLFULL\) + - slow request +- thrashosds: + op_delay: 30 + clean_interval: 120 + chance_down: .75 + min_live: 5 + min_in: 5 + chance_test_backfill_full: .5 +- radosbench: + clients: [client.0] + time: 1800 + type: rand + objectsize: 1048576 diff --git a/qa/suites/rados/singleton/all/thrash-eio.yaml b/qa/suites/rados/singleton/all/thrash-eio.yaml new file mode 100644 index 00000000..0afb6c86 --- /dev/null +++ b/qa/suites/rados/singleton/all/thrash-eio.yaml @@ -0,0 +1,47 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 +- - osd.3 + - osd.4 + - osd.5 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +override: + ceph: + conf: + mon: + osd default pool size: 3 +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - missing primary copy of + - objects unfound and apparently lost + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(SLOW_OPS\) + - \(PG_ + - \(OBJECT_MISPLACED\) + - \(OSD_ + - \(OBJECT_ + - \(TOO_FEW_PGS\) + - slow request +- thrashosds: + op_delay: 30 + clean_interval: 120 + chance_down: .5 + random_eio: .33 + min_live: 5 + min_in: 5 +- radosbench: + clients: [client.0] + time: 720 + type: rand + objectsize: 1048576 diff --git a/qa/suites/rados/singleton/all/thrash-rados/+ b/qa/suites/rados/singleton/all/thrash-rados/+ new file mode 100644 index 00000000..e69de29b --- /dev/null +++ b/qa/suites/rados/singleton/all/thrash-rados/+ diff --git a/qa/suites/rados/singleton/all/thrash-rados/.qa b/qa/suites/rados/singleton/all/thrash-rados/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton/all/thrash-rados/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml b/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml new file mode 100644 index 00000000..37be8df9 --- /dev/null +++ b/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml @@ -0,0 +1,27 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 +- - osd.3 + - osd.4 + - osd.5 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running +- thrashosds: + op_delay: 30 + clean_interval: 120 + chance_down: .5 +- workunit: + clients: + all: + - rados/load-gen-mix-small.sh diff --git a/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml b/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml
\ No newline at end of file diff --git a/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml b/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml new file mode 100644 index 00000000..c0b27075 --- /dev/null +++ b/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml @@ -0,0 +1,70 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 +- - osd.3 + - osd.4 + - osd.5 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 30 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - slow request + - overall HEALTH_ + - \(CACHE_POOL_ +- exec: + client.0: + - sudo ceph osd pool create base 4 + - sudo ceph osd pool application enable base rados + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add base cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay base cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 60 + - sudo ceph osd pool set cache target_max_objects 500 +- background_exec: + mon.a: + - while true + - do sleep 30 + - echo proxy + - sudo ceph osd tier cache-mode cache proxy + - sleep 10 + - sudo ceph osd pool set cache cache_target_full_ratio .001 + - echo cache-try-flush-evict-all + - rados -p cache cache-try-flush-evict-all + - sleep 5 + - echo cache-flush-evict-all + - rados -p cache cache-flush-evict-all + - sleep 5 + - echo remove overlay + - sudo ceph osd tier remove-overlay base + - sleep 20 + - echo add writeback overlay + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd pool set cache cache_target_full_ratio .8 + - sudo ceph osd tier set-overlay base cache + - sleep 30 + - sudo ceph osd tier cache-mode cache readproxy + - done +- rados: + clients: [client.0] + pools: [base] + max_seconds: 600 + ops: 400000 + objects: 10000 + size: 1024 + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 diff --git a/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml b/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml new file mode 100644 index 00000000..48ef78ff --- /dev/null +++ b/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml @@ -0,0 +1,32 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + config: + global: + osd pool default min size : 1 + client: + debug ms: 1 + debug objecter: 20 + debug rados: 20 + log-whitelist: + - objects unfound and apparently lost + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_DEGRADED\) +- watch_notify_same_primary: + clients: [client.0] diff --git a/qa/suites/rados/singleton/msgr b/qa/suites/rados/singleton/msgr new file mode 120000 index 00000000..57bee80d --- /dev/null +++ b/qa/suites/rados/singleton/msgr @@ -0,0 +1 @@ +.qa/msgr
\ No newline at end of file diff --git a/qa/suites/rados/singleton/msgr-failures/.qa b/qa/suites/rados/singleton/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/singleton/msgr-failures/few.yaml b/qa/suites/rados/singleton/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/rados/singleton/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/singleton/msgr-failures/many.yaml b/qa/suites/rados/singleton/msgr-failures/many.yaml new file mode 100644 index 00000000..20aeb4df --- /dev/null +++ b/qa/suites/rados/singleton/msgr-failures/many.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 1000 + mon mgr beacon grace: 90 + mon client hunt interval max multiple: 2 + mgr: + debug monc: 10 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/singleton/objectstore b/qa/suites/rados/singleton/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rados/singleton/objectstore @@ -0,0 +1 @@ +.qa/objectstore
\ No newline at end of file diff --git a/qa/suites/rados/singleton/rados.yaml b/qa/suites/rados/singleton/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/singleton/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml
\ No newline at end of file diff --git a/qa/suites/rados/singleton/supported-random-distro$ b/qa/suites/rados/singleton/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/singleton/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$
\ No newline at end of file |