diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:45:59 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:45:59 +0000 |
commit | 19fcec84d8d7d21e796c7624e521b60d28ee21ed (patch) | |
tree | 42d26aa27d1e3f7c0b8bd3fd14e7d7082f5008dc /qa/suites/upgrade | |
parent | Initial commit. (diff) | |
download | ceph-19fcec84d8d7d21e796c7624e521b60d28ee21ed.tar.xz ceph-19fcec84d8d7d21e796c7624e521b60d28ee21ed.zip |
Adding upstream version 16.2.11+ds.upstream/16.2.11+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/suites/upgrade')
279 files changed, 2991 insertions, 0 deletions
diff --git a/qa/suites/upgrade/.qa b/qa/suites/upgrade/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/cephfs b/qa/suites/upgrade/cephfs new file mode 120000 index 000000000..1ff68fa8b --- /dev/null +++ b/qa/suites/upgrade/cephfs @@ -0,0 +1 @@ +.qa/suites/fs/upgrade/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x-singleton/% b/qa/suites/upgrade/nautilus-x-singleton/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/% diff --git a/qa/suites/upgrade/nautilus-x-singleton/.qa b/qa/suites/upgrade/nautilus-x-singleton/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x-singleton/0-cluster/+ b/qa/suites/upgrade/nautilus-x-singleton/0-cluster/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/0-cluster/+ diff --git a/qa/suites/upgrade/nautilus-x-singleton/0-cluster/.qa b/qa/suites/upgrade/nautilus-x-singleton/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x-singleton/0-cluster/openstack.yaml b/qa/suites/upgrade/nautilus-x-singleton/0-cluster/openstack.yaml new file mode 100644 index 000000000..a0d5c2019 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/0-cluster/openstack.yaml @@ -0,0 +1,6 @@ +openstack: + - machine: + disk: 100 # GB + - volumes: # attached to each instance + count: 3 + size: 30 # GB diff --git a/qa/suites/upgrade/nautilus-x-singleton/0-cluster/start.yaml b/qa/suites/upgrade/nautilus-x-singleton/0-cluster/start.yaml new file mode 100644 index 000000000..ee3f97ad5 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/0-cluster/start.yaml @@ -0,0 +1,39 @@ +meta: +- desc: | + Run ceph on two nodes, + with a separate client-only node. + Use xfs beneath the osds. +overrides: + ceph: + mon_bind_addrvec: false + mon_bind_msgr2: false + fs: xfs + conf: + global: + ms dump corrupt message level: 0 + ms bind msgr2: false + bluestore warn on no per pool omap: false + mon pg warn min per osd: 0 + mds: + debug ms: 1 + debug mds: 20 +roles: +- - mon.a + - mgr.x + - mds.a + - osd.0 + - osd.1 + - osd.2 + - osd.3 +- - mon.b + - mgr.y + - osd.4 + - osd.5 + - osd.6 + - osd.7 +- - mon.c + - osd.8 + - osd.9 + - osd.10 + - osd.11 +- - client.0 diff --git a/qa/suites/upgrade/nautilus-x-singleton/1-install/.qa b/qa/suites/upgrade/nautilus-x-singleton/1-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/1-install/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x-singleton/1-install/nautilus.yaml b/qa/suites/upgrade/nautilus-x-singleton/1-install/nautilus.yaml new file mode 100644 index 000000000..ecb1035b6 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/1-install/nautilus.yaml @@ -0,0 +1,30 @@ +overrides: + ceph: + log-ignorelist: + - \(MON_DOWN\) + - \(MGR_DOWN\) + - slow request + - Not found or unloadable + - evicting unresponsive client +meta: +- desc: install ceph/nautilus latest +tasks: +- install: + branch: nautilus + exclude_packages: + - ceph-mgr-cephadm + - cephadm +- print: "**** done install nautilus" +- ceph: +- print: "**** done ceph" + +# create a hit set test pool that will generate hit sets prior to octopus +- exec: + mon.a: + - sudo ceph osd pool create test-hit-set-base 32 + - sudo ceph osd pool create test-hit-set-cache 32 + - sudo ceph osd tier add test-hit-set-base test-hit-set-cache + - sudo ceph osd pool set test-hit-set-cache hit_set_type bloom + - sudo ceph osd pool set test-hit-set-cache hit_set_count 32 + - sudo ceph osd pool set test-hit-set-cache hit_set_period 15 + - rados -p test-hit-set-cache bench 30 write -b 1 diff --git a/qa/suites/upgrade/nautilus-x-singleton/2-partial-upgrade/.qa b/qa/suites/upgrade/nautilus-x-singleton/2-partial-upgrade/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/2-partial-upgrade/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x-singleton/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/nautilus-x-singleton/2-partial-upgrade/firsthalf.yaml new file mode 100644 index 000000000..90c28d46f --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/2-partial-upgrade/firsthalf.yaml @@ -0,0 +1,20 @@ +meta: +- desc: | + install upgrade ceph/-x on one node only + 1st half + restart : osd.0,1,2,3,4,5 +tasks: +- install.upgrade: + mon.a: + mon.b: +- print: "**** done install.upgrade osd.0" +- ceph.restart: + daemons: [mgr.x, mgr.y] +- ceph.restart: + daemons: [mon.a, mon.b] + wait-for-healthy: false + mon-health-to-clog: false +- ceph.restart: + daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7] + wait-for-healthy: false +- print: "**** done ceph.restart 1st 2/3s" diff --git a/qa/suites/upgrade/nautilus-x-singleton/3-thrash/.qa b/qa/suites/upgrade/nautilus-x-singleton/3-thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/3-thrash/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x-singleton/3-thrash/default.yaml b/qa/suites/upgrade/nautilus-x-singleton/3-thrash/default.yaml new file mode 100644 index 000000000..5360e867f --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/3-thrash/default.yaml @@ -0,0 +1,22 @@ +meta: +- desc: | + randomly kill and revive osd + small chance to increase the number of pgs +overrides: + ceph: + log-ignorelist: + - but it is still running + - objects unfound and apparently lost + - log bound mismatch +tasks: +- parallel: + - split_tasks +split_tasks: + sequential: + - thrashosds: + disable_objectstore_tool_tests: true + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + aggressive_pg_num_changes: false + - print: "**** done thrashosds 3-thrash" diff --git a/qa/suites/upgrade/nautilus-x-singleton/4-workload/+ b/qa/suites/upgrade/nautilus-x-singleton/4-workload/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/4-workload/+ diff --git a/qa/suites/upgrade/nautilus-x-singleton/4-workload/.qa b/qa/suites/upgrade/nautilus-x-singleton/4-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/4-workload/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x-singleton/4-workload/rbd-cls.yaml b/qa/suites/upgrade/nautilus-x-singleton/4-workload/rbd-cls.yaml new file mode 100644 index 000000000..31ca77ecc --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/4-workload/rbd-cls.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + run basic cls tests for rbd +split_tasks: + sequential: + - workunit: + branch: nautilus + clients: + client.0: + - cls/test_cls_rbd.sh + - print: "**** done cls/test_cls_rbd.sh 5-workload" diff --git a/qa/suites/upgrade/nautilus-x-singleton/4-workload/rbd-import-export.yaml b/qa/suites/upgrade/nautilus-x-singleton/4-workload/rbd-import-export.yaml new file mode 100644 index 000000000..489ef9b37 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/4-workload/rbd-import-export.yaml @@ -0,0 +1,13 @@ +meta: +- desc: | + run basic import/export cli tests for rbd +split_tasks: + sequential: + - workunit: + branch: nautilus + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format + - print: "**** done rbd/import_export.sh 5-workload" diff --git a/qa/suites/upgrade/nautilus-x-singleton/4-workload/readwrite.yaml b/qa/suites/upgrade/nautilus-x-singleton/4-workload/readwrite.yaml new file mode 100644 index 000000000..8833d4d87 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/4-workload/readwrite.yaml @@ -0,0 +1,17 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool, + using only reads, writes, and deletes +split_tasks: + sequential: + - full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 45 + write: 45 + delete: 10 + - print: "**** done rados/readwrite 5-workload" diff --git a/qa/suites/upgrade/nautilus-x-singleton/4-workload/snaps-few-objects.yaml b/qa/suites/upgrade/nautilus-x-singleton/4-workload/snaps-few-objects.yaml new file mode 100644 index 000000000..c96cfbe30 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/4-workload/snaps-few-objects.yaml @@ -0,0 +1,19 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +split_tasks: + sequential: + - full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + - print: "**** done rados/snaps-few-objects 5-workload" diff --git a/qa/suites/upgrade/nautilus-x-singleton/5-workload/+ b/qa/suites/upgrade/nautilus-x-singleton/5-workload/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/5-workload/+ diff --git a/qa/suites/upgrade/nautilus-x-singleton/5-workload/.qa b/qa/suites/upgrade/nautilus-x-singleton/5-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/5-workload/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x-singleton/5-workload/radosbench.yaml b/qa/suites/upgrade/nautilus-x-singleton/5-workload/radosbench.yaml new file mode 100644 index 000000000..2cfbf1dcf --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/5-workload/radosbench.yaml @@ -0,0 +1,41 @@ +meta: +- desc: | + run randomized correctness test for rados operations + generate write load with rados bench +split_tasks: + sequential: + - full_sequential: + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - print: "**** done radosbench 7-workload" diff --git a/qa/suites/upgrade/nautilus-x-singleton/5-workload/rbd_api.yaml b/qa/suites/upgrade/nautilus-x-singleton/5-workload/rbd_api.yaml new file mode 100644 index 000000000..c221617d6 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/5-workload/rbd_api.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + librbd C and C++ api tests +split_tasks: + sequential: + - workunit: + branch: nautilus + clients: + client.0: + - rbd/test_librbd.sh + - print: "**** done rbd/test_librbd.sh 7-workload" diff --git a/qa/suites/upgrade/nautilus-x-singleton/6-finish-upgrade.yaml b/qa/suites/upgrade/nautilus-x-singleton/6-finish-upgrade.yaml new file mode 100644 index 000000000..222ba4878 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/6-finish-upgrade.yaml @@ -0,0 +1,37 @@ +meta: +- desc: | + install upgrade on remaining node + restartin remaining osds +overrides: + ceph: + log-ignorelist: + - overall HEALTH_ + - \(FS_DEGRADED\) + - \(MDS_ +tasks: +- install.upgrade: + mon.c: +- ceph.restart: + daemons: [mon.c, mgr.x, mgr.y] + wait-for-up: true + wait-for-healthy: false +- ceph.restart: + daemons: [osd.8, osd.9, osd.10, osd.11] + wait-for-up: true + wait-for-healthy: false +- ceph.restart: + daemons: [mds.a] + wait-for-up: true + wait-for-healthy: false +- exec: + mon.a: + - ceph mon enable-msgr2 +- install.upgrade: + client.0: + +# reduce canary pool hit set count from 32 -> 4 and do some io so that +# we are sure they will be trimmed post-upgrade. +- exec: + mon.a: + - sudo ceph osd pool set test-hit-set-cache hit_set_count 4 + - rados -p test-hit-set-cache bench 5 write -b 1 diff --git a/qa/suites/upgrade/nautilus-x-singleton/7-pacific.yaml b/qa/suites/upgrade/nautilus-x-singleton/7-pacific.yaml new file mode 120000 index 000000000..1467fc88e --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/7-pacific.yaml @@ -0,0 +1 @@ +.qa/releases/pacific.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x-singleton/8-workload/+ b/qa/suites/upgrade/nautilus-x-singleton/8-workload/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/8-workload/+ diff --git a/qa/suites/upgrade/nautilus-x-singleton/8-workload/.qa b/qa/suites/upgrade/nautilus-x-singleton/8-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/8-workload/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x-singleton/8-workload/rbd-python.yaml b/qa/suites/upgrade/nautilus-x-singleton/8-workload/rbd-python.yaml new file mode 100644 index 000000000..56ba21d7a --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/8-workload/rbd-python.yaml @@ -0,0 +1,9 @@ +meta: +- desc: | + librbd python api tests +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh +- print: "**** done rbd/test_librbd_python.sh 9-workload" diff --git a/qa/suites/upgrade/nautilus-x-singleton/8-workload/snaps-many-objects.yaml b/qa/suites/upgrade/nautilus-x-singleton/8-workload/snaps-many-objects.yaml new file mode 100644 index 000000000..805bf97c3 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/8-workload/snaps-many-objects.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/nautilus-x-singleton/bluestore-bitmap.yaml b/qa/suites/upgrade/nautilus-x-singleton/bluestore-bitmap.yaml new file mode 120000 index 000000000..a59cf5175 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x-singleton/mon_election b/qa/suites/upgrade/nautilus-x-singleton/mon_election new file mode 120000 index 000000000..3f331e621 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/mon_election @@ -0,0 +1 @@ +.qa/mon_election
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x-singleton/thrashosds-health.yaml b/qa/suites/upgrade/nautilus-x-singleton/thrashosds-health.yaml new file mode 120000 index 000000000..9124eb1aa --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x-singleton/ubuntu_18.04.yaml b/qa/suites/upgrade/nautilus-x-singleton/ubuntu_18.04.yaml new file mode 120000 index 000000000..cfb85f10e --- /dev/null +++ b/qa/suites/upgrade/nautilus-x-singleton/ubuntu_18.04.yaml @@ -0,0 +1 @@ +.qa/distros/all/ubuntu_18.04.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/.qa b/qa/suites/upgrade/nautilus-x/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/parallel/% b/qa/suites/upgrade/nautilus-x/parallel/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/% diff --git a/qa/suites/upgrade/nautilus-x/parallel/.qa b/qa/suites/upgrade/nautilus-x/parallel/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/parallel/0-cluster/+ b/qa/suites/upgrade/nautilus-x/parallel/0-cluster/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/0-cluster/+ diff --git a/qa/suites/upgrade/nautilus-x/parallel/0-cluster/.qa b/qa/suites/upgrade/nautilus-x/parallel/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/parallel/0-cluster/openstack.yaml b/qa/suites/upgrade/nautilus-x/parallel/0-cluster/openstack.yaml new file mode 100644 index 000000000..f4d1349b4 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/0-cluster/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 30 # GB diff --git a/qa/suites/upgrade/nautilus-x/parallel/0-cluster/start.yaml b/qa/suites/upgrade/nautilus-x/parallel/0-cluster/start.yaml new file mode 100644 index 000000000..71a64bbc1 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/0-cluster/start.yaml @@ -0,0 +1,46 @@ +meta: +- desc: | + Run ceph on two nodes, + with a separate client 0,1,2 third node. + Use xfs beneath the osds. + CephFS tests running on client 2,3 +roles: +- - mon.a + - mgr.x + - mds.a + - osd.0 + - osd.1 + - osd.2 + - osd.3 +- - mon.b + - osd.4 + - osd.5 + - osd.6 + - osd.7 +- - mon.c + - osd.8 + - osd.9 + - osd.10 + - osd.11 +- - client.0 + - client.1 + - client.2 + - client.3 +overrides: + ceph: + log-ignorelist: + - scrub mismatch + - ScrubResult + - wrongly marked + - \(POOL_APP_NOT_ENABLED\) + - \(SLOW_OPS\) + - overall HEALTH_ + - slow request + conf: + global: + enable experimental unrecoverable data corrupting features: "*" + mon: + mon warn on osd down out interval zero: false + osd: + osd class load list: "*" + osd class default list: "*" diff --git a/qa/suites/upgrade/nautilus-x/parallel/1-ceph-install/.qa b/qa/suites/upgrade/nautilus-x/parallel/1-ceph-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/1-ceph-install/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/parallel/1-ceph-install/nautilus.yaml b/qa/suites/upgrade/nautilus-x/parallel/1-ceph-install/nautilus.yaml new file mode 100644 index 000000000..ce82cdbf0 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/1-ceph-install/nautilus.yaml @@ -0,0 +1,55 @@ +meta: +- desc: | + install ceph/nautilus latest + run workload and upgrade-sequence in parallel + upgrade the client node +tasks: +- install: + exclude_packages: + - ceph-mgr-cephadm + - cephadm + - libcephfs-dev + branch: nautilus +- print: "**** done installing nautilus" +- ceph: + log-ignorelist: + - overall HEALTH_ + - \(FS_ + - \(MDS_ + - \(OSD_ + - \(MON_DOWN\) + - \(CACHE_POOL_ + - \(POOL_ + - \(MGR_DOWN\) + - \(PG_ + - \(SMALLER_PGP_NUM\) + - Monitor daemon marked osd + - Behind on trimming + - Manager daemon + - Not found or unloadable + - evicting unresponsive client + conf: + global: + mon warn on pool no app: false + bluestore_warn_on_legacy_statfs: false + bluestore warn on no per pool omap: false + mon pg warn min per osd: 0 +- exec: + osd.0: + - ceph osd set-require-min-compat-client nautilus +- print: "**** done ceph" +- install.upgrade: + mon.a: + mon.b: + mon.c: +- print: "**** done install.upgrade non-client hosts" +- rgw: + - client.1 +- print: "**** done => started rgw client.1" +- parallel: + - workload + - upgrade-sequence +- print: "**** done parallel" +- install.upgrade: + client.0: +- print: "**** done install.upgrade on client.0" diff --git a/qa/suites/upgrade/nautilus-x/parallel/1.1-pg-log-overrides/.qa b/qa/suites/upgrade/nautilus-x/parallel/1.1-pg-log-overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/1.1-pg-log-overrides/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/parallel/1.1-pg-log-overrides/normal_pg_log.yaml b/qa/suites/upgrade/nautilus-x/parallel/1.1-pg-log-overrides/normal_pg_log.yaml new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/1.1-pg-log-overrides/normal_pg_log.yaml @@ -0,0 +1 @@ + diff --git a/qa/suites/upgrade/nautilus-x/parallel/1.1-pg-log-overrides/short_pg_log.yaml b/qa/suites/upgrade/nautilus-x/parallel/1.1-pg-log-overrides/short_pg_log.yaml new file mode 100644 index 000000000..e31e37ba6 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/1.1-pg-log-overrides/short_pg_log.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + osd: + osd min pg log entries: 1 + osd max pg log entries: 2 diff --git a/qa/suites/upgrade/nautilus-x/parallel/2-workload/+ b/qa/suites/upgrade/nautilus-x/parallel/2-workload/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/2-workload/+ diff --git a/qa/suites/upgrade/nautilus-x/parallel/2-workload/.qa b/qa/suites/upgrade/nautilus-x/parallel/2-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/2-workload/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/parallel/2-workload/blogbench.yaml b/qa/suites/upgrade/nautilus-x/parallel/2-workload/blogbench.yaml new file mode 100644 index 000000000..021fcc681 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/2-workload/blogbench.yaml @@ -0,0 +1,14 @@ +meta: +- desc: | + run a cephfs stress test + mount ceph-fuse on client.2 before running workunit +workload: + full_sequential: + - sequential: + - ceph-fuse: + - print: "**** done ceph-fuse 2-workload" + - workunit: + clients: + client.2: + - suites/blogbench.sh + - print: "**** done suites/blogbench.sh 2-workload" diff --git a/qa/suites/upgrade/nautilus-x/parallel/2-workload/ec-rados-default.yaml b/qa/suites/upgrade/nautilus-x/parallel/2-workload/ec-rados-default.yaml new file mode 100644 index 000000000..5c5a95880 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/2-workload/ec-rados-default.yaml @@ -0,0 +1,24 @@ +meta: +- desc: | + run run randomized correctness test for rados operations + on an erasure-coded pool +workload: + full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done rados ec task" diff --git a/qa/suites/upgrade/nautilus-x/parallel/2-workload/rados_api.yaml b/qa/suites/upgrade/nautilus-x/parallel/2-workload/rados_api.yaml new file mode 100644 index 000000000..3a5c03b1c --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/2-workload/rados_api.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + object class functional tests +workload: + full_sequential: + - workunit: + branch: nautilus + clients: + client.0: + - cls + - print: "**** done cls 2-workload" diff --git a/qa/suites/upgrade/nautilus-x/parallel/2-workload/rados_loadgenbig.yaml b/qa/suites/upgrade/nautilus-x/parallel/2-workload/rados_loadgenbig.yaml new file mode 100644 index 000000000..7258c4755 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/2-workload/rados_loadgenbig.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + generate read/write load with rados objects ranging from 1MB to 25MB +workload: + full_sequential: + - workunit: + branch: nautilus + clients: + client.0: + - rados/load-gen-big.sh + - print: "**** done rados/load-gen-big.sh 2-workload" diff --git a/qa/suites/upgrade/nautilus-x/parallel/2-workload/rgw_ragweed_prepare.yaml b/qa/suites/upgrade/nautilus-x/parallel/2-workload/rgw_ragweed_prepare.yaml new file mode 100644 index 000000000..5dbffc73d --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/2-workload/rgw_ragweed_prepare.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + rgw ragweed prepare +workload: + full_sequential: + - sequential: + - ragweed: + client.1: + default-branch: ceph-pacific + rgw_server: client.1 + stages: prepare + - print: "**** done rgw ragweed prepare 2-workload" diff --git a/qa/suites/upgrade/nautilus-x/parallel/2-workload/test_rbd_api.yaml b/qa/suites/upgrade/nautilus-x/parallel/2-workload/test_rbd_api.yaml new file mode 100644 index 000000000..4e232bbcd --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/2-workload/test_rbd_api.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + librbd C and C++ api tests +workload: + full_sequential: + - workunit: + branch: nautilus + clients: + client.0: + - rbd/test_librbd.sh + - print: "**** done rbd/test_librbd.sh 2-workload" diff --git a/qa/suites/upgrade/nautilus-x/parallel/2-workload/test_rbd_python.yaml b/qa/suites/upgrade/nautilus-x/parallel/2-workload/test_rbd_python.yaml new file mode 100644 index 000000000..9ac09aed5 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/2-workload/test_rbd_python.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + librbd python api tests +workload: + full_sequential: + - workunit: + branch: nautilus + clients: + client.0: + - rbd/test_librbd_python.sh + - print: "**** done rbd/test_librbd_python.sh 2-workload" diff --git a/qa/suites/upgrade/nautilus-x/parallel/3-upgrade-sequence/.qa b/qa/suites/upgrade/nautilus-x/parallel/3-upgrade-sequence/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/3-upgrade-sequence/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/nautilus-x/parallel/3-upgrade-sequence/upgrade-all.yaml new file mode 100644 index 000000000..e8349b690 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/3-upgrade-sequence/upgrade-all.yaml @@ -0,0 +1,22 @@ +meta: +- desc: | + upgrade the ceph cluster +upgrade-sequence: + sequential: + - ceph.restart: + daemons: [mon.a, mon.b, mon.c, mgr.x] + mon-health-to-clog: false + wait-for-healthy: false + - exec: + mon.a: + - ceph config set global mon_warn_on_msgr2_not_enabled false + - ceph.healthy: + - ceph.restart: + daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, osd.10, osd.11] + wait-for-healthy: false + wait-for-osds-up: true + - ceph.restart: + daemons: [mds.a, rgw.*] + wait-for-healthy: false + wait-for-osds-up: true + - print: "**** done ceph.restart all" diff --git a/qa/suites/upgrade/nautilus-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/nautilus-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 000000000..3c45d7658 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,55 @@ +meta: +- desc: | + upgrade the ceph cluster, + upgrate in two steps + step one ordering: mon.a, osd.0, osd.1, mds.a + step two ordering: mon.b, mon.c, osd.2, osd.3 + step three ordering: client.1 + ceph expected to be healthy state after each step +upgrade-sequence: + sequential: + - ceph.restart: + daemons: [mgr.x, mon.a] + wait-for-healthy: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.b] + wait-for-healthy: true + mon-health-to-clog: false + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.c] + wait-for-healthy: false + mon-health-to-clog: false + - ceph.healthy: + - sleep: + duration: 60 + - ceph.restart: + daemons: [osd.0, osd.1, osd.2, osd.3] + wait-for-healthy: true + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - sleep: + duration: 60 + - ceph.restart: + daemons: [osd.4, osd.5, osd.6, osd.7] + wait-for-healthy: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [osd.8, osd.9, osd.10, osd.11] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [rgw.*] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 diff --git a/qa/suites/upgrade/nautilus-x/parallel/4-pacific.yaml b/qa/suites/upgrade/nautilus-x/parallel/4-pacific.yaml new file mode 120000 index 000000000..1467fc88e --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/4-pacific.yaml @@ -0,0 +1 @@ +.qa/releases/pacific.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/+ b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/+ diff --git a/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/.qa b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/blogbench.yaml b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/blogbench.yaml new file mode 100644 index 000000000..205f72e83 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/blogbench.yaml @@ -0,0 +1,13 @@ +meta: +- desc: | + run a cephfs stress test + mount ceph-fuse on client.3 before running workunit +tasks: +- sequential: + - ceph-fuse: + - print: "**** done ceph-fuse 4-final-workload" + - workunit: + clients: + client.3: + - suites/blogbench.sh + - print: "**** done suites/blogbench.sh 4-final-workload" diff --git a/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rados-snaps-few-objects.yaml b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rados-snaps-few-objects.yaml new file mode 100644 index 000000000..d8b3dcb38 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rados-snaps-few-objects.yaml @@ -0,0 +1,17 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshots +tasks: + - rados: + clients: [client.1] + ops: 4000 + objects: 50 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + - print: "**** done rados 4-final-workload" diff --git a/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rados_loadgenmix.yaml b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rados_loadgenmix.yaml new file mode 100644 index 000000000..922a9da4f --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rados_loadgenmix.yaml @@ -0,0 +1,9 @@ +meta: +- desc: | + generate read/write load with rados objects ranging from 1 byte to 1MB +tasks: + - workunit: + clients: + client.1: + - rados/load-gen-mix.sh + - print: "**** done rados/load-gen-mix.sh 4-final-workload" diff --git a/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rados_mon_thrash.yaml b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rados_mon_thrash.yaml new file mode 100644 index 000000000..08706dfc1 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rados_mon_thrash.yaml @@ -0,0 +1,18 @@ +meta: +- desc: | + librados C and C++ api tests +overrides: + ceph: + log-ignorelist: + - reached quota + - \(REQUEST_SLOW\) +tasks: + - mon_thrash: + revive_delay: 20 + thrash_delay: 1 + - print: "**** done mon_thrash 4-final-workload" + - workunit: + clients: + client.1: + - rados/test.sh + - print: "**** done rados/test.sh 4-final-workload" diff --git a/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rbd_cls.yaml b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rbd_cls.yaml new file mode 100644 index 000000000..aaf0a3779 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rbd_cls.yaml @@ -0,0 +1,9 @@ +meta: +- desc: | + rbd object class functional tests +tasks: + - workunit: + clients: + client.1: + - cls/test_cls_rbd.sh + - print: "**** done cls/test_cls_rbd.sh 4-final-workload" diff --git a/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rbd_import_export.yaml b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rbd_import_export.yaml new file mode 100644 index 000000000..46e135506 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rbd_import_export.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + run basic import/export cli tests for rbd +tasks: + - workunit: + clients: + client.1: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format + - print: "**** done rbd/import_export.sh 4-final-workload" diff --git a/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rgw.yaml b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rgw.yaml new file mode 100644 index 000000000..de9599472 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rgw.yaml @@ -0,0 +1,7 @@ +overrides: + rgw: + frontend: civetweb +tasks: + - sequential: + - rgw-final-workload + - print: "**** done rgw 4-final-workload" diff --git a/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rgw_ragweed_check.yaml b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rgw_ragweed_check.yaml new file mode 100644 index 000000000..2e94f2503 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/5-final-workload/rgw_ragweed_check.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + ragweed check for rgw +rgw-final-workload: + full_sequential: + - ragweed: + client.1: + default-branch: ceph-pacific + rgw_server: client.1 + stages: check + - print: "**** done ragweed check 4-final-workload" diff --git a/qa/suites/upgrade/nautilus-x/parallel/mon_election b/qa/suites/upgrade/nautilus-x/parallel/mon_election new file mode 120000 index 000000000..3f331e621 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/mon_election @@ -0,0 +1 @@ +.qa/mon_election
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/parallel/objectstore b/qa/suites/upgrade/nautilus-x/parallel/objectstore new file mode 120000 index 000000000..016cbf967 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/objectstore @@ -0,0 +1 @@ +../stress-split/objectstore/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/parallel/ubuntu_18.04.yaml b/qa/suites/upgrade/nautilus-x/parallel/ubuntu_18.04.yaml new file mode 120000 index 000000000..cfb85f10e --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/parallel/ubuntu_18.04.yaml @@ -0,0 +1 @@ +.qa/distros/all/ubuntu_18.04.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/% b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/% diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/.qa b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/0-cluster b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/0-cluster new file mode 120000 index 000000000..358093728 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/0-cluster @@ -0,0 +1 @@ +../stress-split/0-cluster/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/1-nautilus-install b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/1-nautilus-install new file mode 120000 index 000000000..0479ac542 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/1-nautilus-install @@ -0,0 +1 @@ +../stress-split/1-ceph-install/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/1.1-pg-log-overrides/.qa b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/1.1-pg-log-overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/1.1-pg-log-overrides/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/1.1-pg-log-overrides/normal_pg_log.yaml b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/1.1-pg-log-overrides/normal_pg_log.yaml new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/1.1-pg-log-overrides/normal_pg_log.yaml @@ -0,0 +1 @@ + diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/1.1-pg-log-overrides/short_pg_log.yaml b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/1.1-pg-log-overrides/short_pg_log.yaml new file mode 100644 index 000000000..e31e37ba6 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/1.1-pg-log-overrides/short_pg_log.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + osd: + osd min pg log entries: 1 + osd max pg log entries: 2 diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/2-partial-upgrade b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/2-partial-upgrade new file mode 120000 index 000000000..ab35fc1a5 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/2-partial-upgrade @@ -0,0 +1 @@ +../stress-split/2-partial-upgrade/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/3-thrash/.qa b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/3-thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/3-thrash/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/3-thrash/default.yaml b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/3-thrash/default.yaml new file mode 100644 index 000000000..3290918ac --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/3-thrash/default.yaml @@ -0,0 +1,27 @@ +meta: +- desc: | + randomly kill and revive osd + small chance to increase the number of pgs +overrides: + ceph: + log-ignorelist: + - but it is still running + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- parallel: + - stress-tasks +stress-tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + min_in: 4 + chance_thrash_cluster_full: 0 + chance_thrash_pg_upmap: 0 + chance_thrash_pg_upmap_items: 0 + chance_force_recovery: 0 + aggressive_pg_num_changes: false + disable_objectstore_tool_tests: true +- print: "**** done thrashosds 3-thrash" diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/3.1-objectstore b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/3.1-objectstore new file mode 120000 index 000000000..016cbf967 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/3.1-objectstore @@ -0,0 +1 @@ +../stress-split/objectstore/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/4-ec-workload/% b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/4-ec-workload/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/4-ec-workload/% diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/4-ec-workload/.qa b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/4-ec-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/4-ec-workload/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/4-ec-workload/rados-ec-workload.yaml b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/4-ec-workload/rados-ec-workload.yaml new file mode 100644 index 000000000..c89551e6b --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/4-ec-workload/rados-ec-workload.yaml @@ -0,0 +1,22 @@ +meta: +- desc: | + randomized correctness test for rados operations on an erasure coded pool +stress-tasks: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done rados ec task" diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/4-ec-workload/rbd-ec-workload.yaml b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/4-ec-workload/rbd-ec-workload.yaml new file mode 100644 index 000000000..d0e661dca --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/4-ec-workload/rbd-ec-workload.yaml @@ -0,0 +1,31 @@ +meta: +- desc: | + run rbd tests on EC pool + overrides => force bluestore since it's required for ec-overwrite + use an EC pool for rbd and run xfstests on top of it to verify correctness +tasks: +- exec: + client.0: + - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 + - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile + - sudo ceph osd pool set datapool allow_ec_overwrites true + - rbd pool init datapool +- qemu: + all: + clone: true + type: block + disks: 3 + test: qa/run_xfstests_qemu.sh +- print: "**** done rbd/qemu ec task" +exclude_arch: armv7l +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + client: + rbd default data pool: datapool + osd: # force bluestore since it's required for ec overwrites + osd objectstore: bluestore diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/5-finish-upgrade.yaml b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/5-finish-upgrade.yaml new file mode 120000 index 000000000..a66a7dc18 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/5-finish-upgrade.yaml @@ -0,0 +1 @@ +../stress-split/5-finish-upgrade.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/6-pacific.yaml b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/6-pacific.yaml new file mode 120000 index 000000000..1467fc88e --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/6-pacific.yaml @@ -0,0 +1 @@ +.qa/releases/pacific.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/7-final-workload.yaml b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/7-final-workload.yaml new file mode 100644 index 000000000..50a146507 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/7-final-workload.yaml @@ -0,0 +1,35 @@ +# +# k=3 implies a stripe_width of 1376*3 = 4128 which is different from +# the default value of 4096 It is also not a multiple of 1024*1024 and +# creates situations where rounding rules during recovery becomes +# necessary. +# +meta: +- desc: | + randomized correctness test for rados operations on an erasure coded pool + using the jerasure plugin with k=3 and m=1 +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + erasure_code_profile: + name: jerasure31profile + plugin: jerasure + k: 3 + m: 1 + technique: reed_sol_van + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/mon_election b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/mon_election new file mode 120000 index 000000000..3f331e621 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/mon_election @@ -0,0 +1 @@ +.qa/mon_election
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/thrashosds-health.yaml b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/thrashosds-health.yaml new file mode 120000 index 000000000..9124eb1aa --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/ubuntu_18.04.yaml b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/ubuntu_18.04.yaml new file mode 120000 index 000000000..cfb85f10e --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split-erasure-code/ubuntu_18.04.yaml @@ -0,0 +1 @@ +.qa/distros/all/ubuntu_18.04.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split/% b/qa/suites/upgrade/nautilus-x/stress-split/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/% diff --git a/qa/suites/upgrade/nautilus-x/stress-split/.qa b/qa/suites/upgrade/nautilus-x/stress-split/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split/0-cluster/+ b/qa/suites/upgrade/nautilus-x/stress-split/0-cluster/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/0-cluster/+ diff --git a/qa/suites/upgrade/nautilus-x/stress-split/0-cluster/.qa b/qa/suites/upgrade/nautilus-x/stress-split/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split/0-cluster/openstack.yaml b/qa/suites/upgrade/nautilus-x/stress-split/0-cluster/openstack.yaml new file mode 100644 index 000000000..5caffc353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/0-cluster/openstack.yaml @@ -0,0 +1,6 @@ +openstack: + - machine: + disk: 100 # GB + - volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/qa/suites/upgrade/nautilus-x/stress-split/0-cluster/start.yaml b/qa/suites/upgrade/nautilus-x/stress-split/0-cluster/start.yaml new file mode 100644 index 000000000..5d61bb7d9 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/0-cluster/start.yaml @@ -0,0 +1,40 @@ +meta: +- desc: | + Run ceph on two nodes, + with a separate client-only node. + Use xfs beneath the osds. +overrides: + ceph: + mon_bind_msgr2: false + mon_bind_addrvec: false + fs: xfs + log-ignorelist: + - overall HEALTH_ + - \(MON_DOWN\) + - \(MGR_DOWN\) + - slow request + - \(MON_MSGR2_NOT_ENABLED\) + conf: + global: + enable experimental unrecoverable data corrupting features: "*" + mon warn on msgr2 not enabled: false + mon: + mon warn on osd down out interval zero: false +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +- - mon.b + - osd.4 + - osd.5 + - osd.6 + - osd.7 +- - mon.c +- - osd.8 + - osd.9 + - osd.10 + - osd.11 +- - client.0 diff --git a/qa/suites/upgrade/nautilus-x/stress-split/1-ceph-install/.qa b/qa/suites/upgrade/nautilus-x/stress-split/1-ceph-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/1-ceph-install/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split/1-ceph-install/nautilus.yaml b/qa/suites/upgrade/nautilus-x/stress-split/1-ceph-install/nautilus.yaml new file mode 100644 index 000000000..7f97631df --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/1-ceph-install/nautilus.yaml @@ -0,0 +1,32 @@ +meta: +- desc: install ceph/nautilus latest +tasks: +- install: + exclude_packages: + - ceph-mgr-cephadm + - cephadm + - libcephfs-dev + branch: nautilus +- print: "**** done install nautilus" +- ceph: + conf: + global: + bluestore_warn_on_legacy_statfs: false + bluestore warn on no per pool omap: false + mon pg warn min per osd: 0 + log-ignorelist: + - Not found or unloadable + - evicting unresponsive client +- exec: + osd.0: + - ceph osd require-osd-release nautilus + - ceph osd set-require-min-compat-client nautilus +- print: "**** done ceph" +- rgw: + - client.0 +- print: "**** done => started rgw client.0" +overrides: + ceph: + conf: + mon: + mon warn on osd down out interval zero: false diff --git a/qa/suites/upgrade/nautilus-x/stress-split/1.1-pg-log-overrides/.qa b/qa/suites/upgrade/nautilus-x/stress-split/1.1-pg-log-overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/1.1-pg-log-overrides/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split/1.1-pg-log-overrides/normal_pg_log.yaml b/qa/suites/upgrade/nautilus-x/stress-split/1.1-pg-log-overrides/normal_pg_log.yaml new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/1.1-pg-log-overrides/normal_pg_log.yaml @@ -0,0 +1 @@ + diff --git a/qa/suites/upgrade/nautilus-x/stress-split/1.1-pg-log-overrides/short_pg_log.yaml b/qa/suites/upgrade/nautilus-x/stress-split/1.1-pg-log-overrides/short_pg_log.yaml new file mode 100644 index 000000000..e31e37ba6 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/1.1-pg-log-overrides/short_pg_log.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + osd: + osd min pg log entries: 1 + osd max pg log entries: 2 diff --git a/qa/suites/upgrade/nautilus-x/stress-split/2-partial-upgrade/.qa b/qa/suites/upgrade/nautilus-x/stress-split/2-partial-upgrade/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/2-partial-upgrade/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/nautilus-x/stress-split/2-partial-upgrade/firsthalf.yaml new file mode 100644 index 000000000..58ff5ac66 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/2-partial-upgrade/firsthalf.yaml @@ -0,0 +1,14 @@ +meta: +- desc: | + install upgrade ceph/-x on 2/3 of cluster + restart : mons, osd.0-7 +tasks: +- install.upgrade: + mon.a: + mon.b: + mon.c: +- print: "**** done install.upgrade of first 3 nodes" +- ceph.restart: + daemons: [mon.a,mon.b,mgr.x,osd.0,osd.1,osd.2,osd.3,osd.4,osd.5,osd.6,osd.7] + mon-health-to-clog: false +- print: "**** done ceph.restart of all mons and 2/3 of osds" diff --git a/qa/suites/upgrade/nautilus-x/stress-split/3-thrash/.qa b/qa/suites/upgrade/nautilus-x/stress-split/3-thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/3-thrash/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split/3-thrash/default.yaml b/qa/suites/upgrade/nautilus-x/stress-split/3-thrash/default.yaml new file mode 100644 index 000000000..2be9c1f29 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/3-thrash/default.yaml @@ -0,0 +1,26 @@ +meta: +- desc: | + randomly kill and revive osd + small chance to increase the number of pgs +overrides: + ceph: + log-ignorelist: + - but it is still running + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- parallel: + - stress-tasks +stress-tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + chance_thrash_cluster_full: 0 + chance_thrash_pg_upmap: 0 + chance_thrash_pg_upmap_items: 0 + disable_objectstore_tool_tests: true + chance_force_recovery: 0 + aggressive_pg_num_changes: false +- print: "**** done thrashosds 3-thrash" diff --git a/qa/suites/upgrade/nautilus-x/stress-split/4-workload/+ b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/+ diff --git a/qa/suites/upgrade/nautilus-x/stress-split/4-workload/.qa b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split/4-workload/radosbench.yaml b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/radosbench.yaml new file mode 100644 index 000000000..115939e6c --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/radosbench.yaml @@ -0,0 +1,52 @@ +meta: +- desc: | + run randomized correctness test for rados operations + generate write load with rados bench +stress-tasks: +- full_sequential: + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 +- print: "**** done radosbench 7-workload" diff --git a/qa/suites/upgrade/nautilus-x/stress-split/4-workload/rbd-cls.yaml b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/rbd-cls.yaml new file mode 100644 index 000000000..0a1fa6306 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/rbd-cls.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + run basic cls tests for rbd +stress-tasks: +- workunit: + branch: nautilus + clients: + client.0: + - cls/test_cls_rbd.sh +- print: "**** done cls/test_cls_rbd.sh 5-workload" diff --git a/qa/suites/upgrade/nautilus-x/stress-split/4-workload/rbd-import-export.yaml b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/rbd-import-export.yaml new file mode 100644 index 000000000..703f4f7cc --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/rbd-import-export.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + run basic import/export cli tests for rbd +stress-tasks: +- workunit: + branch: nautilus + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format +- print: "**** done rbd/import_export.sh 5-workload" diff --git a/qa/suites/upgrade/nautilus-x/stress-split/4-workload/rbd_api.yaml b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/rbd_api.yaml new file mode 100644 index 000000000..c6a4de97f --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/rbd_api.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + librbd C and C++ api tests +stress-tasks: +- workunit: + branch: nautilus + clients: + client.0: + - rbd/test_librbd.sh +- print: "**** done rbd/test_librbd.sh 7-workload" diff --git a/qa/suites/upgrade/nautilus-x/stress-split/4-workload/readwrite.yaml b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/readwrite.yaml new file mode 100644 index 000000000..41e34d6d7 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/readwrite.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool, + using only reads, writes, and deletes +stress-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 45 + write: 45 + delete: 10 +- print: "**** done rados/readwrite 5-workload" diff --git a/qa/suites/upgrade/nautilus-x/stress-split/4-workload/rgw_ragweed_prepare.yaml b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/rgw_ragweed_prepare.yaml new file mode 100644 index 000000000..8adde7ef5 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/rgw_ragweed_prepare.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + rgw ragweed prepare before upgrade +stress-tasks: + - full_sequential: + - sequential: + - ragweed: + client.0: + default-branch: ceph-nautilus + rgw_server: client.0 + stages: prepare + - print: "**** done rgw ragweed prepare 4-workload" diff --git a/qa/suites/upgrade/nautilus-x/stress-split/4-workload/snaps-few-objects.yaml b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/snaps-few-objects.yaml new file mode 100644 index 000000000..f56d0de0f --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/4-workload/snaps-few-objects.yaml @@ -0,0 +1,18 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +stress-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 +- print: "**** done rados/snaps-few-objects 5-workload" diff --git a/qa/suites/upgrade/nautilus-x/stress-split/5-finish-upgrade.yaml b/qa/suites/upgrade/nautilus-x/stress-split/5-finish-upgrade.yaml new file mode 100644 index 000000000..44c78c0f5 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/5-finish-upgrade.yaml @@ -0,0 +1,21 @@ +tasks: +- install.upgrade: + osd.8: + client.0: + extra_packages: + - python3-rados + - python3-rgw + - python3-rbd + - python3-cephfs +- ceph.restart: + daemons: [mon.c, osd.8, osd.9, osd.10, osd.11, rgw.*] + wait-for-healthy: false + wait-for-osds-up: true +- print: "**** restarted/upgrated => mon.c, osd.8, osd.9, osd.10, osd.11, rgw.*" +- exec: + osd.0: + - ceph osd set pglog_hardlimit + - ceph osd dump --format=json-pretty | grep "flags" + - ceph config set global mon_warn_on_msgr2_not_enabled false +- print: "**** try to set pglog_hardlimit again, should succeed" + diff --git a/qa/suites/upgrade/nautilus-x/stress-split/6-pacific.yaml b/qa/suites/upgrade/nautilus-x/stress-split/6-pacific.yaml new file mode 120000 index 000000000..1467fc88e --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/6-pacific.yaml @@ -0,0 +1 @@ +.qa/releases/pacific.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split/7-msgr2.yaml b/qa/suites/upgrade/nautilus-x/stress-split/7-msgr2.yaml new file mode 100644 index 000000000..f56c8be08 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/7-msgr2.yaml @@ -0,0 +1,6 @@ +tasks: +- exec: + mon.a: + - ceph mon enable-msgr2 + - ceph config rm global mon_warn_on_msgr2_not_enabled +- ceph.healthy: diff --git a/qa/suites/upgrade/nautilus-x/stress-split/8-final-workload/+ b/qa/suites/upgrade/nautilus-x/stress-split/8-final-workload/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/8-final-workload/+ diff --git a/qa/suites/upgrade/nautilus-x/stress-split/8-final-workload/.qa b/qa/suites/upgrade/nautilus-x/stress-split/8-final-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/8-final-workload/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split/8-final-workload/rbd-python.yaml b/qa/suites/upgrade/nautilus-x/stress-split/8-final-workload/rbd-python.yaml new file mode 100644 index 000000000..42cc6c663 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/8-final-workload/rbd-python.yaml @@ -0,0 +1,14 @@ +meta: +- desc: | + librbd python api tests +overrides: + ceph: + conf: + client: + rbd default clone format: 1 +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh +- print: "**** done rbd/test_librbd_python.sh 9-workload" diff --git a/qa/suites/upgrade/nautilus-x/stress-split/8-final-workload/snaps-many-objects.yaml b/qa/suites/upgrade/nautilus-x/stress-split/8-final-workload/snaps-many-objects.yaml new file mode 100644 index 000000000..805bf97c3 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/8-final-workload/snaps-many-objects.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/nautilus-x/stress-split/mon_election b/qa/suites/upgrade/nautilus-x/stress-split/mon_election new file mode 120000 index 000000000..3f331e621 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/mon_election @@ -0,0 +1 @@ +.qa/mon_election
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split/objectstore/.qa b/qa/suites/upgrade/nautilus-x/stress-split/objectstore/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/objectstore/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split/objectstore/bluestore-bitmap.yaml b/qa/suites/upgrade/nautilus-x/stress-split/objectstore/bluestore-bitmap.yaml new file mode 120000 index 000000000..a59cf5175 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/objectstore/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split/objectstore/filestore-xfs.yaml b/qa/suites/upgrade/nautilus-x/stress-split/objectstore/filestore-xfs.yaml new file mode 120000 index 000000000..41f2a9d14 --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/objectstore/filestore-xfs.yaml @@ -0,0 +1 @@ +.qa/objectstore/filestore-xfs.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split/thrashosds-health.yaml b/qa/suites/upgrade/nautilus-x/stress-split/thrashosds-health.yaml new file mode 120000 index 000000000..9124eb1aa --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-x/stress-split/ubuntu_18.04.yaml b/qa/suites/upgrade/nautilus-x/stress-split/ubuntu_18.04.yaml new file mode 120000 index 000000000..cfb85f10e --- /dev/null +++ b/qa/suites/upgrade/nautilus-x/stress-split/ubuntu_18.04.yaml @@ -0,0 +1 @@ +.qa/distros/all/ubuntu_18.04.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/.qa b/qa/suites/upgrade/octopus-x/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/qa/suites/upgrade/octopus-x/.qa @@ -0,0 +1 @@ +../.qa
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/% b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/% diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/.qa b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/.qa @@ -0,0 +1 @@ +../.qa
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/0-cluster/+ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/0-cluster/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/0-cluster/+ diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/0-cluster/.qa b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/0-cluster/openstack.yaml b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/0-cluster/openstack.yaml new file mode 100644 index 000000000..f4d1349b4 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/0-cluster/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 30 # GB diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/0-cluster/start.yaml b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/0-cluster/start.yaml new file mode 100644 index 000000000..1a397621c --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/0-cluster/start.yaml @@ -0,0 +1,46 @@ +meta: +- desc: | + Run ceph on two nodes, + with a separate client 0,1,2 third node. + Use xfs beneath the osds. + CephFS tests running on client 2,3 +roles: +- - mon.a + - mgr.x + #- mds.a ref: https://tracker.ceph.com/issues/48994 + - osd.0 + - osd.1 + - osd.2 + - osd.3 +- - mon.b + - osd.4 + - osd.5 + - osd.6 + - osd.7 +- - mon.c + - osd.8 + - osd.9 + - osd.10 + - osd.11 +- - client.0 + - client.1 + - client.2 + - client.3 +overrides: + ceph: + log-whitelist: + - scrub mismatch + - ScrubResult + - wrongly marked + - \(POOL_APP_NOT_ENABLED\) + - \(SLOW_OPS\) + - overall HEALTH_ + - slow request + conf: + global: + enable experimental unrecoverable data corrupting features: "*" + mon: + mon warn on osd down out interval zero: false + osd: + osd class load list: "*" + osd class default list: "*" diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/1-ceph-install/.qa b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/1-ceph-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/1-ceph-install/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/1-ceph-install/octopus.yaml b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/1-ceph-install/octopus.yaml new file mode 100644 index 000000000..17303380a --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/1-ceph-install/octopus.yaml @@ -0,0 +1,53 @@ +meta: +- desc: | + install ceph/octopus latest + run workload and upgrade-sequence in parallel + upgrade the client node +tasks: +- install: + exclude_packages: + - ceph-mgr-cephadm + - cephadm + - libcephfs-dev + branch: octopus +- print: "**** done installing octopus" +- ceph: + log-ignorelist: + - overall HEALTH_ + - \(FS_ + - \(MDS_ + - \(OSD_ + - \(MON_DOWN\) + - \(CACHE_POOL_ + - \(POOL_ + - \(MGR_DOWN\) + - \(PG_ + - \(SMALLER_PGP_NUM\) + - Monitor daemon marked osd + - Behind on trimming + - Manager daemon + conf: + global: + mon warn on pool no app: false + bluestore_warn_on_legacy_statfs: false + bluestore warn on no per pool omap: false + mon pg warn min per osd: 0 +- exec: + osd.0: + - ceph osd set-require-min-compat-client octopus +- print: "**** done ceph" +- install.upgrade: + mon.a: + mon.b: + mon.c: +- print: "**** done install.upgrade non-client hosts" +- rgw: + - client.1 +- print: "**** done => started rgw client.1" +- parallel: + - workload + - upgrade-sequence +- print: "**** done parallel" +- install.upgrade: + client.0: +- print: "**** done install.upgrade on client.0" diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/1.1-pg-log-overrides/.qa b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/1.1-pg-log-overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/1.1-pg-log-overrides/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/1.1-pg-log-overrides/normal_pg_log.yaml b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/1.1-pg-log-overrides/normal_pg_log.yaml new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/1.1-pg-log-overrides/normal_pg_log.yaml @@ -0,0 +1 @@ + diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/1.1-pg-log-overrides/short_pg_log.yaml b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/1.1-pg-log-overrides/short_pg_log.yaml new file mode 100644 index 000000000..e31e37ba6 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/1.1-pg-log-overrides/short_pg_log.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + osd: + osd min pg log entries: 1 + osd max pg log entries: 2 diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/2-workload/+ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/2-workload/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/2-workload/+ diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/2-workload/.qa b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/2-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/2-workload/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/2-workload/rgw_ragweed_prepare.yaml b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/2-workload/rgw_ragweed_prepare.yaml new file mode 100644 index 000000000..4735b8be5 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/2-workload/rgw_ragweed_prepare.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + rgw ragweed prepare +workload: + full_sequential: + - sequential: + - ragweed: + client.1: + default-branch: ceph-octopus + rgw_server: client.1 + stages: prepare + - print: "**** done rgw ragweed prepare 2-workload" diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/2-workload/test_rbd_python.disable b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/2-workload/test_rbd_python.disable new file mode 100644 index 000000000..2ab3c18cd --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/2-workload/test_rbd_python.disable @@ -0,0 +1,12 @@ +meta: +- desc: | + librbd python api tests +#this test disable ref: https://tracker.ceph.com/issues/48759 +workload: + full_sequential: + - workunit: + branch: octopus + clients: + client.0: + - rbd/test_librbd_python.sh + - print: "**** done rbd/test_librbd_python.sh 2-workload " diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/3-upgrade-sequence/.qa b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/3-upgrade-sequence/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/3-upgrade-sequence/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/3-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/3-upgrade-sequence/upgrade-all.yaml new file mode 100644 index 000000000..b033e23d6 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/3-upgrade-sequence/upgrade-all.yaml @@ -0,0 +1,29 @@ +meta: +- desc: | + upgrade the ceph cluster +upgrade-sequence: + sequential: + - ceph.restart: + daemons: [mon.a, mon.b, mon.c, mgr.x] + mon-health-to-clog: false + wait-for-healthy: false + - sleep: + duration: 60 + - exec: + mon.a: + - ceph config set global mon_warn_on_msgr2_not_enabled false + - ceph.healthy: + - ceph.restart: + daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, osd.10, osd.11] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: + #daemons: [mds.a, rgw.*] this needs to be added when mds is enabled in riles + daemons: [rgw.*] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - print: "**** done ceph.restart all" diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 000000000..9f1d20822 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,58 @@ +meta: +- desc: | + upgrade the ceph cluster, + upgrate in two steps + step one ordering: mon.a, osd.0, osd.1, mds.a + step two ordering: mon.b, mon.c, osd.2, osd.3 + step three ordering: client.1 + ceph expected to be healthy state after each step +upgrade-sequence: + sequential: + - ceph.restart: + daemons: [mgr.x, mon.a] + wait-for-healthy: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.b] + wait-for-healthy: true + mon-health-to-clog: false + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.c] + wait-for-healthy: false + mon-health-to-clog: false + - ceph.healthy: + - sleep: + duration: 60 + - ceph.restart: + daemons: [osd.0, osd.1, osd.2, osd.3] + wait-for-healthy: true + - sleep: + duration: 60 + #this needs to be added when mds is enabled in riles + #- ceph.restart: [mds.a] + #- sleep: + # duration: 60 + - sleep: + duration: 60 + - ceph.restart: + daemons: [osd.4, osd.5, osd.6, osd.7] + wait-for-healthy: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [osd.8, osd.9, osd.10, osd.11] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [rgw.*] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 + - print: "**** done upgrade-mon-osd-mds.yaml" + diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/4-pacific.yaml b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/4-pacific.yaml new file mode 120000 index 000000000..1467fc88e --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/4-pacific.yaml @@ -0,0 +1 @@ +.qa/releases/pacific.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/5-final-workload/+ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/5-final-workload/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/5-final-workload/+ diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/5-final-workload/.qa b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/5-final-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/5-final-workload/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/5-final-workload/rgw.yaml b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/5-final-workload/rgw.yaml new file mode 100644 index 000000000..de9599472 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/5-final-workload/rgw.yaml @@ -0,0 +1,7 @@ +overrides: + rgw: + frontend: civetweb +tasks: + - sequential: + - rgw-final-workload + - print: "**** done rgw 4-final-workload" diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/5-final-workload/rgw_ragweed_check.yaml b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/5-final-workload/rgw_ragweed_check.yaml new file mode 100644 index 000000000..2e94f2503 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/5-final-workload/rgw_ragweed_check.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + ragweed check for rgw +rgw-final-workload: + full_sequential: + - ragweed: + client.1: + default-branch: ceph-pacific + rgw_server: client.1 + stages: check + - print: "**** done ragweed check 4-final-workload" diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/centos_latest.yaml b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/centos_latest.yaml new file mode 120000 index 000000000..bd9854e70 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/centos_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/centos_latest.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/parallel-no-cephadm/ubuntu_latest.disable b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/ubuntu_latest.disable new file mode 120000 index 000000000..3a09f9abb --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel-no-cephadm/ubuntu_latest.disable @@ -0,0 +1 @@ +.qa/distros/supported/ubuntu_latest.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/parallel/% b/qa/suites/upgrade/octopus-x/parallel/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel/% diff --git a/qa/suites/upgrade/octopus-x/parallel/.qa b/qa/suites/upgrade/octopus-x/parallel/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel/.qa @@ -0,0 +1 @@ +../.qa
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/parallel/0-distro b/qa/suites/upgrade/octopus-x/parallel/0-distro new file mode 120000 index 000000000..fb247fda1 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel/0-distro @@ -0,0 +1 @@ +../../../rados/cephadm/smoke/distro/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/parallel/0-start.yaml b/qa/suites/upgrade/octopus-x/parallel/0-start.yaml new file mode 100644 index 000000000..ad3ee43d3 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel/0-start.yaml @@ -0,0 +1,31 @@ +roles: +- - mon.a + - mon.c + - mgr.y + - osd.0 + - osd.1 + - osd.2 + - osd.3 + - client.0 + - node-exporter.a + - alertmanager.a +- - mon.b + - mgr.x + - osd.4 + - osd.5 + - osd.6 + - osd.7 + - client.1 + - prometheus.a + - grafana.a + - node-exporter.b +openstack: +- volumes: # attached to each instance + count: 4 + size: 10 # GB +overrides: + ceph: + create_rbd_pool: true + conf: + osd: + osd shutdown pgref assert: true diff --git a/qa/suites/upgrade/octopus-x/parallel/1-tasks.yaml b/qa/suites/upgrade/octopus-x/parallel/1-tasks.yaml new file mode 100644 index 000000000..3c298bc03 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel/1-tasks.yaml @@ -0,0 +1,39 @@ +tasks: +- install: + branch: octopus +- print: "**** done install task..." +- print: "**** done start installing octopus cephadm ..." +- cephadm: + image: quay.ceph.io/ceph-ci/ceph:octopus + cephadm_branch: octopus + cephadm_git_url: https://github.com/ceph/ceph + conf: + osd: + #set config option for which cls modules are allowed to be loaded / used + osd_class_load_list: "*" + osd_class_default_list: "*" + # deploy additional mons the "old" (octopus) way + add_mons_via_daemon_add: true + avoid_pacific_features: true +- print: "**** done end installing octopus cephadm ..." + +- cephadm.shell: + mon.a: + - ceph fs volume create foo +- ceph.healthy: + +- print: "**** done creating new fs" + +- cephadm.shell: + mon.a: + - ceph config set mgr mgr/cephadm/use_repo_digest true --force + - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force + - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force + +- print: "**** done cephadm.shell ceph config set mgr..." + +- print: "**** done start parallel" +- parallel: + - workload + - upgrade-sequence +- print: "**** done end parallel" diff --git a/qa/suites/upgrade/octopus-x/parallel/mon_election b/qa/suites/upgrade/octopus-x/parallel/mon_election new file mode 120000 index 000000000..3f331e621 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel/mon_election @@ -0,0 +1 @@ +.qa/mon_election
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/parallel/upgrade-sequence.yaml b/qa/suites/upgrade/octopus-x/parallel/upgrade-sequence.yaml new file mode 100644 index 000000000..cb8cba707 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel/upgrade-sequence.yaml @@ -0,0 +1,15 @@ +# renamed tasks: to upgrade-sequence: +upgrade-sequence: + sequential: + - print: "**** done start upgrade, wait" + - cephadm.shell: + env: [sha1] + mon.a: + - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 + - while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done + - ceph orch ps + - ceph versions + - ceph versions | jq -e '.overall | length == 1' + - ceph versions | jq -e '.overall | keys' | grep $sha1 + - print: "**** done end upgrade, wait..." + diff --git a/qa/suites/upgrade/octopus-x/parallel/workload/+ b/qa/suites/upgrade/octopus-x/parallel/workload/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel/workload/+ diff --git a/qa/suites/upgrade/octopus-x/parallel/workload/.qa b/qa/suites/upgrade/octopus-x/parallel/workload/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel/workload/.qa @@ -0,0 +1 @@ +../.qa
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/parallel/workload/ec-rados-default.yaml b/qa/suites/upgrade/octopus-x/parallel/workload/ec-rados-default.yaml new file mode 100644 index 000000000..67a0f39c5 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel/workload/ec-rados-default.yaml @@ -0,0 +1,25 @@ +meta: +- desc: | + run run randomized correctness test for rados operations + on an erasure-coded pool +workload: + full_sequential: + - print: "**** done start ec-rados-default.yaml" + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done end ec-rados-default.yaml" diff --git a/qa/suites/upgrade/octopus-x/parallel/workload/rados_api.yaml b/qa/suites/upgrade/octopus-x/parallel/workload/rados_api.yaml new file mode 100644 index 000000000..f18e7d98d --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel/workload/rados_api.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + object class functional tests +workload: + full_sequential: + - print: "**** done start rados_api.yaml" + - workunit: + branch: octopus + clients: + client.0: + - cls + - print: "**** done end rados_api.yaml" diff --git a/qa/suites/upgrade/octopus-x/parallel/workload/rados_loadgenbig.yaml b/qa/suites/upgrade/octopus-x/parallel/workload/rados_loadgenbig.yaml new file mode 100644 index 000000000..6b93c83d7 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel/workload/rados_loadgenbig.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + generate read/write load with rados objects ranging from 1MB to 25MB +workload: + full_sequential: + - print: "**** done start rados_loadgenbig.yaml" + - workunit: + branch: octopus + clients: + client.0: + - rados/load-gen-big.sh + - print: "**** done end rados_loadgenbig.yaml" diff --git a/qa/suites/upgrade/octopus-x/parallel/workload/rbd_import_export.yaml b/qa/suites/upgrade/octopus-x/parallel/workload/rbd_import_export.yaml new file mode 100644 index 000000000..25cd3c756 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel/workload/rbd_import_export.yaml @@ -0,0 +1,14 @@ +meta: +- desc: | + run basic import/export cli tests for rbd +workload: + full_sequential: + - print: "**** done start rbd_import_export.yaml" + - workunit: + branch: octopus + clients: + client.1: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format + - print: "**** done end rbd_import_export.yaml" diff --git a/qa/suites/upgrade/octopus-x/parallel/workload/test_rbd_api.yaml b/qa/suites/upgrade/octopus-x/parallel/workload/test_rbd_api.yaml new file mode 100644 index 000000000..c85d4783c --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel/workload/test_rbd_api.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + librbd C and C++ api tests +workload: + full_sequential: + - print: "**** done start test_rbd_api.yaml" + - workunit: + branch: octopus + clients: + client.0: + - rbd/test_librbd.sh + - print: "**** done end test_rbd_api.yaml" diff --git a/qa/suites/upgrade/octopus-x/parallel/workload/test_rbd_python.disable b/qa/suites/upgrade/octopus-x/parallel/workload/test_rbd_python.disable new file mode 100644 index 000000000..ac0b4cfcf --- /dev/null +++ b/qa/suites/upgrade/octopus-x/parallel/workload/test_rbd_python.disable @@ -0,0 +1,18 @@ + + +##### This is disabled due to https://tracker.ceph.com/issues/48759 + + +meta: +- desc: | + librbd python api tests +workload: + full_sequential: + - print: "**** done start test_rbd_python.yaml" + - workunit: + branch: octopus + clients: + client.0: + - rbd/test_librbd_python.sh + - print: "**** done end test_rbd_python.yaml" + diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/% b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/% diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/.qa b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/.qa @@ -0,0 +1 @@ +../.qa
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/0-cluster b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/0-cluster new file mode 120000 index 000000000..c7da783ed --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/0-cluster @@ -0,0 +1 @@ +../stress-split-no-cephadm/0-cluster
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1-octopus-install b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1-octopus-install new file mode 120000 index 000000000..078cb9006 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1-octopus-install @@ -0,0 +1 @@ +../stress-split-no-cephadm/1-ceph-install
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/.qa b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/normal_pg_log.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/normal_pg_log.yaml new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/normal_pg_log.yaml @@ -0,0 +1 @@ + diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/short_pg_log.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/short_pg_log.yaml new file mode 100644 index 000000000..e31e37ba6 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/1.1-pg-log-overrides/short_pg_log.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + osd: + osd min pg log entries: 1 + osd max pg log entries: 2 diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/2-partial-upgrade b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/2-partial-upgrade new file mode 120000 index 000000000..920b14402 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/2-partial-upgrade @@ -0,0 +1 @@ +../stress-split-no-cephadm/2-partial-upgrade
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3-thrash/.qa b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3-thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3-thrash/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3-thrash/default.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3-thrash/default.yaml new file mode 100644 index 000000000..82ab4ced3 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3-thrash/default.yaml @@ -0,0 +1,26 @@ +meta: +- desc: | + randomly kill and revive osd + small chance to increase the number of pgs +overrides: + ceph: + log-ignorelist: + - but it is still running + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- parallel: + - stress-tasks +stress-tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + chance_thrash_cluster_full: 0 + chance_thrash_pg_upmap: 0 + chance_thrash_pg_upmap_items: 0 + disable_objectstore_tool_tests: true + chance_force_recovery: 0 + aggressive_pg_num_changes: false +- print: "**** done thrashosds default.yaml" diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3.1-objectstore b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3.1-objectstore new file mode 120000 index 000000000..b7cd805a0 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/3.1-objectstore @@ -0,0 +1 @@ +../stress-split-no-cephadm/objectstore
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/% b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/% diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/.qa b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/rados-ec-workload.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/rados-ec-workload.yaml new file mode 100644 index 000000000..c89551e6b --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/rados-ec-workload.yaml @@ -0,0 +1,22 @@ +meta: +- desc: | + randomized correctness test for rados operations on an erasure coded pool +stress-tasks: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done rados ec task" diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/rbd-ec-workload.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/rbd-ec-workload.yaml new file mode 100644 index 000000000..d0e661dca --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/4-ec-workload/rbd-ec-workload.yaml @@ -0,0 +1,31 @@ +meta: +- desc: | + run rbd tests on EC pool + overrides => force bluestore since it's required for ec-overwrite + use an EC pool for rbd and run xfstests on top of it to verify correctness +tasks: +- exec: + client.0: + - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 + - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile + - sudo ceph osd pool set datapool allow_ec_overwrites true + - rbd pool init datapool +- qemu: + all: + clone: true + type: block + disks: 3 + test: qa/run_xfstests_qemu.sh +- print: "**** done rbd/qemu ec task" +exclude_arch: armv7l +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + client: + rbd default data pool: datapool + osd: # force bluestore since it's required for ec overwrites + osd objectstore: bluestore diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/5-finish-upgrade.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/5-finish-upgrade.yaml new file mode 120000 index 000000000..9723ca018 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/5-finish-upgrade.yaml @@ -0,0 +1 @@ +../stress-split-no-cephadm/5-finish-upgrade.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/6-pacific.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/6-pacific.yaml new file mode 120000 index 000000000..1467fc88e --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/6-pacific.yaml @@ -0,0 +1 @@ +.qa/releases/pacific.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/7-final-workload.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/7-final-workload.yaml new file mode 100644 index 000000000..31d5ac961 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/7-final-workload.yaml @@ -0,0 +1,36 @@ +# +# k=3 implies a stripe_width of 1376*3 = 4128 which is different from +# the default value of 4096 It is also not a multiple of 1024*1024 and +# creates situations where rounding rules during recovery becomes +# necessary. +# +meta: +- desc: | + randomized correctness test for rados operations on an erasure coded pool + using the jerasure plugin with k=3 and m=1 +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + erasure_code_profile: + name: jerasure31profile + plugin: jerasure + k: 3 + m: 1 + technique: reed_sol_van + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 +- print: "**** done rados ec 7-final-workload.yaml" diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/mon_election b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/mon_election new file mode 120000 index 000000000..3f331e621 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/mon_election @@ -0,0 +1 @@ +.qa/mon_election
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/thrashosds-health.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/thrashosds-health.yaml new file mode 120000 index 000000000..9124eb1aa --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/ubuntu_18.04.yaml b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/ubuntu_18.04.yaml new file mode 120000 index 000000000..cfb85f10e --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-erasure-code-no-cephadm/ubuntu_18.04.yaml @@ -0,0 +1 @@ +.qa/distros/all/ubuntu_18.04.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/% b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/% diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/.qa b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/.qa @@ -0,0 +1 @@ +../.qa
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/0-cluster/+ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/0-cluster/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/0-cluster/+ diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/0-cluster/.qa b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/0-cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/0-cluster/openstack.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/0-cluster/openstack.yaml new file mode 100644 index 000000000..5caffc353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/0-cluster/openstack.yaml @@ -0,0 +1,6 @@ +openstack: + - machine: + disk: 100 # GB + - volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/0-cluster/start.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/0-cluster/start.yaml new file mode 100644 index 000000000..5d61bb7d9 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/0-cluster/start.yaml @@ -0,0 +1,40 @@ +meta: +- desc: | + Run ceph on two nodes, + with a separate client-only node. + Use xfs beneath the osds. +overrides: + ceph: + mon_bind_msgr2: false + mon_bind_addrvec: false + fs: xfs + log-ignorelist: + - overall HEALTH_ + - \(MON_DOWN\) + - \(MGR_DOWN\) + - slow request + - \(MON_MSGR2_NOT_ENABLED\) + conf: + global: + enable experimental unrecoverable data corrupting features: "*" + mon warn on msgr2 not enabled: false + mon: + mon warn on osd down out interval zero: false +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +- - mon.b + - osd.4 + - osd.5 + - osd.6 + - osd.7 +- - mon.c +- - osd.8 + - osd.9 + - osd.10 + - osd.11 +- - client.0 diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/1-ceph-install/.qa b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/1-ceph-install/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/1-ceph-install/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/1-ceph-install/octopus.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/1-ceph-install/octopus.yaml new file mode 100644 index 000000000..3d7a52a49 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/1-ceph-install/octopus.yaml @@ -0,0 +1,35 @@ +meta: +- desc: install ceph/octopus latest +tasks: +- install: + exclude_packages: + - ceph-mgr-cephadm + - cephadm + - libcephfs-dev + branch: octopus +- print: "**** done install octopus" +- ceph: + conf: + global: + bluestore_warn_on_legacy_statfs: false + bluestore warn on no per pool omap: false + mon pg warn min per osd: 0 + mon: + mon_warn_on_insecure_global_id_reclaim: false + mon_warn_on_insecure_global_id_reclaim_allowed: false + log-ignorelist: + - Not found or unloadable + - evicting unresponsive client +- exec: + osd.0: + - ceph osd require-osd-release octopus + - ceph osd set-require-min-compat-client octopus +- print: "**** done ceph" +- rgw: + - client.0 +- print: "**** done => started rgw client.0" +overrides: + ceph: + conf: + mon: + mon warn on osd down out interval zero: false diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/1.1-pg-log-overrides/.qa b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/1.1-pg-log-overrides/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/1.1-pg-log-overrides/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/1.1-pg-log-overrides/normal_pg_log.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/1.1-pg-log-overrides/normal_pg_log.yaml new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/1.1-pg-log-overrides/normal_pg_log.yaml @@ -0,0 +1 @@ + diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/1.1-pg-log-overrides/short_pg_log.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/1.1-pg-log-overrides/short_pg_log.yaml new file mode 100644 index 000000000..e31e37ba6 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/1.1-pg-log-overrides/short_pg_log.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + osd: + osd min pg log entries: 1 + osd max pg log entries: 2 diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/2-partial-upgrade/.qa b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/2-partial-upgrade/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/2-partial-upgrade/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/2-partial-upgrade/firsthalf.yaml new file mode 100644 index 000000000..58ff5ac66 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/2-partial-upgrade/firsthalf.yaml @@ -0,0 +1,14 @@ +meta: +- desc: | + install upgrade ceph/-x on 2/3 of cluster + restart : mons, osd.0-7 +tasks: +- install.upgrade: + mon.a: + mon.b: + mon.c: +- print: "**** done install.upgrade of first 3 nodes" +- ceph.restart: + daemons: [mon.a,mon.b,mgr.x,osd.0,osd.1,osd.2,osd.3,osd.4,osd.5,osd.6,osd.7] + mon-health-to-clog: false +- print: "**** done ceph.restart of all mons and 2/3 of osds" diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/3-thrash/.qa b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/3-thrash/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/3-thrash/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/3-thrash/default.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/3-thrash/default.yaml new file mode 100644 index 000000000..2be9c1f29 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/3-thrash/default.yaml @@ -0,0 +1,26 @@ +meta: +- desc: | + randomly kill and revive osd + small chance to increase the number of pgs +overrides: + ceph: + log-ignorelist: + - but it is still running + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- parallel: + - stress-tasks +stress-tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + chance_thrash_cluster_full: 0 + chance_thrash_pg_upmap: 0 + chance_thrash_pg_upmap_items: 0 + disable_objectstore_tool_tests: true + chance_force_recovery: 0 + aggressive_pg_num_changes: false +- print: "**** done thrashosds 3-thrash" diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/+ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/+ diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/.qa b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/radosbench.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/radosbench.yaml new file mode 100644 index 000000000..5832dfa51 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/radosbench.yaml @@ -0,0 +1,52 @@ +meta: +- desc: | + run randomized correctness test for rados operations + generate write load with rados bench +stress-tasks: +- full_sequential: + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 +- print: "**** done end radosbench.yaml" diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/rbd-cls.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/rbd-cls.yaml new file mode 100644 index 000000000..07ab6e10e --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/rbd-cls.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + run basic cls tests for rbd +stress-tasks: +- workunit: + branch: octopus + clients: + client.0: + - cls/test_cls_rbd.sh +- print: "**** done cls/test_cls_rbd.sh 5-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/rbd-import-export.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/rbd-import-export.yaml new file mode 100644 index 000000000..a3968fef3 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/rbd-import-export.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + run basic import/export cli tests for rbd +stress-tasks: +- workunit: + branch: octopus + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format +- print: "**** done rbd/import_export.sh 5-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/rbd_api.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/rbd_api.yaml new file mode 100644 index 000000000..7212d3fc6 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/rbd_api.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + librbd C and C++ api tests +stress-tasks: +- workunit: + branch: octopus + clients: + client.0: + - rbd/test_librbd.sh +- print: "**** done rbd/test_librbd.sh 7-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/readwrite.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/readwrite.yaml new file mode 100644 index 000000000..41e34d6d7 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/readwrite.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool, + using only reads, writes, and deletes +stress-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 45 + write: 45 + delete: 10 +- print: "**** done rados/readwrite 5-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/rgw_ragweed_prepare.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/rgw_ragweed_prepare.yaml new file mode 100644 index 000000000..bcf5ebe5d --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/rgw_ragweed_prepare.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + rgw ragweed prepare before upgrade +stress-tasks: + - full_sequential: + - sequential: + - ragweed: + client.0: + default-branch: ceph-octopus + rgw_server: client.0 + stages: prepare + - print: "**** done rgw ragweed prepare 4-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/snaps-few-objects.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/snaps-few-objects.yaml new file mode 100644 index 000000000..f56d0de0f --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/4-workload/snaps-few-objects.yaml @@ -0,0 +1,18 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +stress-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 +- print: "**** done rados/snaps-few-objects 5-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/5-finish-upgrade.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/5-finish-upgrade.yaml new file mode 100644 index 000000000..e94de51b2 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/5-finish-upgrade.yaml @@ -0,0 +1,15 @@ +tasks: +- install.upgrade: + osd.8: + client.0: + extra_packages: + - python3-rados + - python3-rgw + - python3-rbd + - python3-cephfs +- ceph.restart: + daemons: [mon.c, osd.8, osd.9, osd.10, osd.11, rgw.*] + wait-for-healthy: false + wait-for-osds-up: true +- print: "**** done restarted/upgraded => mon.c, osd.8, osd.9, osd.10, osd.11, rgw.*" + diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/6-pacific.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/6-pacific.yaml new file mode 120000 index 000000000..1467fc88e --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/6-pacific.yaml @@ -0,0 +1 @@ +.qa/releases/pacific.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/8-final-workload/+ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/8-final-workload/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/8-final-workload/+ diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/8-final-workload/.qa b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/8-final-workload/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/8-final-workload/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/8-final-workload/rbd-python.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/8-final-workload/rbd-python.yaml new file mode 100644 index 000000000..d04fab7df --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/8-final-workload/rbd-python.yaml @@ -0,0 +1,14 @@ +meta: +- desc: | + librbd python api tests +overrides: + ceph: + conf: + client: + rbd default clone format: 1 +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh +- print: "**** done rbd/test_librbd_python.sh" diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/8-final-workload/snaps-many-objects.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/8-final-workload/snaps-many-objects.yaml new file mode 100644 index 000000000..805bf97c3 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/8-final-workload/snaps-many-objects.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/mon_election b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/mon_election new file mode 120000 index 000000000..3f331e621 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/mon_election @@ -0,0 +1 @@ +.qa/mon_election
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/objectstore/.qa b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/objectstore/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/objectstore/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/objectstore/bluestore-hybrid.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/objectstore/bluestore-hybrid.yaml new file mode 120000 index 000000000..39c0a9bc0 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/objectstore/bluestore-hybrid.yaml @@ -0,0 +1 @@ +.qa/objectstore_debug/bluestore-hybrid.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/objectstore/filestore-xfs.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/objectstore/filestore-xfs.yaml new file mode 120000 index 000000000..d83049f3c --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/objectstore/filestore-xfs.yaml @@ -0,0 +1 @@ +.qa/objectstore_debug/filestore-xfs.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/thrashosds-health.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/thrashosds-health.yaml new file mode 120000 index 000000000..9124eb1aa --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/ubuntu_18.04.yaml b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/ubuntu_18.04.yaml new file mode 120000 index 000000000..cfb85f10e --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split-no-cephadm/ubuntu_18.04.yaml @@ -0,0 +1 @@ +.qa/distros/all/ubuntu_18.04.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split/% b/qa/suites/upgrade/octopus-x/stress-split/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/% diff --git a/qa/suites/upgrade/octopus-x/stress-split/.qa b/qa/suites/upgrade/octopus-x/stress-split/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/.qa @@ -0,0 +1 @@ +../.qa
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split/0-distro b/qa/suites/upgrade/octopus-x/stress-split/0-distro new file mode 120000 index 000000000..f84388135 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/0-distro @@ -0,0 +1 @@ +../../../rados/cephadm/smoke/distro
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split/0-roles.yaml b/qa/suites/upgrade/octopus-x/stress-split/0-roles.yaml new file mode 100644 index 000000000..ad3ee43d3 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/0-roles.yaml @@ -0,0 +1,31 @@ +roles: +- - mon.a + - mon.c + - mgr.y + - osd.0 + - osd.1 + - osd.2 + - osd.3 + - client.0 + - node-exporter.a + - alertmanager.a +- - mon.b + - mgr.x + - osd.4 + - osd.5 + - osd.6 + - osd.7 + - client.1 + - prometheus.a + - grafana.a + - node-exporter.b +openstack: +- volumes: # attached to each instance + count: 4 + size: 10 # GB +overrides: + ceph: + create_rbd_pool: true + conf: + osd: + osd shutdown pgref assert: true diff --git a/qa/suites/upgrade/octopus-x/stress-split/1-start.yaml b/qa/suites/upgrade/octopus-x/stress-split/1-start.yaml new file mode 100644 index 000000000..74599e22d --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/1-start.yaml @@ -0,0 +1,122 @@ +tasks: +- install: + branch: octopus + +- cephadm: + image: quay.ceph.io/ceph-ci/ceph:octopus + cephadm_branch: octopus + cephadm_git_url: https://github.com/ceph/ceph + conf: + osd: + #set config option for which cls modules are allowed to be loaded / used + osd_class_load_list: "*" + osd_class_default_list: "*" + # deploy additional mons the "old" (octopus) way + add_mons_via_daemon_add: true + avoid_pacific_features: true + +- cephadm.shell: + mon.a: + - ceph fs volume create foo + - ceph config set mon mon_warn_on_insecure_global_id_reclaim false --force + - ceph config set mon mon_warn_on_insecure_global_id_reclaim_allowed false --force + +- ceph.healthy: + +- print: "**** upgrading first half of cluster, with stress ****" +- parallel: + - first-half-tasks + - first-half-sequence +- print: "**** done upgrading first half of cluster ****" + +- ceph.healthy: + +- print: "**** applying stress + thrashing to mixed-version cluster ****" + +- parallel: + - stress-tasks + +- ceph.healthy: + +- print: "**** finishing upgrade ****" +- parallel: + - second-half-tasks + - second-half-sequence + +- ceph.healthy: + + +################# + +first-half-sequence: +- cephadm.shell: + env: [sha1] + mon.a: + - ceph config set mgr mgr/cephadm/daemon_cache_timeout 60 + + - ceph orch upgrade start --image quay.ceph.io/ceph-ci/ceph:$sha1 + - ceph orch ps + + - echo wait for minority of mons to upgrade + - while ! ceph mon versions | grep $sha1 ; do sleep 2 ; done + - ceph orch ps + - ceph orch upgrade pause + - sleep 60 + - ceph orch upgrade resume + + - echo wait for majority of mons to upgrade + - "while ! ceph mon versions | grep $sha1 | egrep ': [23]' ; do sleep 2 ; done" + - ceph orch ps + - ceph orch upgrade pause + - sleep 60 + - ceph orch upgrade resume + + - echo wait for all mons to upgrade + - "while ! ceph mon versions | grep $sha1 | grep ': 3' ; do sleep 2 ; done" + - ceph orch ps + - ceph orch upgrade pause + - sleep 60 + - ceph orch upgrade resume + + - echo wait for half of osds to upgrade + - "while ! ceph osd versions | grep $sha1 | egrep ': [45678]'; do sleep 2 ; done" + - ceph orch upgrade pause + - ceph orch ps + + - ceph orch ps + - ceph versions + + +################# + +stress-tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + chance_thrash_cluster_full: 0 + chance_thrash_pg_upmap: 0 + chance_thrash_pg_upmap_items: 0 + disable_objectstore_tool_tests: true + chance_force_recovery: 0 + aggressive_pg_num_changes: false + + +################# + +second-half-sequence: + sequential: + - cephadm.shell: + env: [sha1] + mon.a: + - ceph orch upgrade resume + - sleep 60 + + - echo wait for upgrade to complete + - while ceph orch upgrade status | jq '.in_progress' | grep true ; do ceph orch ps ; ceph versions ; sleep 30 ; done + + - echo upgrade complete + - ceph orch ps + - ceph versions + - ceph versions | jq -e '.overall | length == 1' + - ceph versions | jq -e '.overall | keys' | grep $sha1 diff --git a/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/.qa b/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/.qa @@ -0,0 +1 @@ +../.qa
\ No newline at end of file diff --git a/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/radosbench.yaml b/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/radosbench.yaml new file mode 100644 index 000000000..3816ca38c --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/radosbench.yaml @@ -0,0 +1,19 @@ +meta: +- desc: | + run randomized correctness test for rados operations + generate write load with rados bench +first-half-tasks: +- full_sequential: + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 +- print: "**** done end radosbench.yaml" diff --git a/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/rbd-cls.yaml b/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/rbd-cls.yaml new file mode 100644 index 000000000..ffe09dc74 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/rbd-cls.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + run basic cls tests for rbd +first-half-tasks: +- workunit: + branch: octopus + clients: + client.0: + - cls/test_cls_rbd.sh +- print: "**** done cls/test_cls_rbd.sh 5-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/rbd-import-export.yaml b/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/rbd-import-export.yaml new file mode 100644 index 000000000..992f31bd7 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/rbd-import-export.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + run basic import/export cli tests for rbd +first-half-tasks: +- workunit: + branch: octopus + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format +- print: "**** done rbd/import_export.sh 5-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/rbd_api.yaml b/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/rbd_api.yaml new file mode 100644 index 000000000..e4c1d54e8 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/rbd_api.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + librbd C and C++ api tests +first-half-tasks: +- workunit: + branch: octopus + clients: + client.0: + - rbd/test_librbd.sh +- print: "**** done rbd/test_librbd.sh 7-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/readwrite.yaml b/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/readwrite.yaml new file mode 100644 index 000000000..21a9f379a --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/readwrite.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool, + using only reads, writes, and deletes +first-half-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 45 + write: 45 + delete: 10 +- print: "**** done rados/readwrite 5-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/snaps-few-objects.yaml b/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/snaps-few-objects.yaml new file mode 100644 index 000000000..6447c2245 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/2-first-half-tasks/snaps-few-objects.yaml @@ -0,0 +1,18 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +first-half-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 +- print: "**** done rados/snaps-few-objects 5-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/+ b/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/+ diff --git a/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/radosbench.yaml b/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/radosbench.yaml new file mode 100644 index 000000000..9058bd804 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/radosbench.yaml @@ -0,0 +1,25 @@ +meta: +- desc: | + run randomized correctness test for rados operations + generate write load with rados bench +stress-tasks: +- full_sequential: + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 +- print: "**** done end radosbench.yaml" diff --git a/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/rbd-cls.yaml b/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/rbd-cls.yaml new file mode 100644 index 000000000..07ab6e10e --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/rbd-cls.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + run basic cls tests for rbd +stress-tasks: +- workunit: + branch: octopus + clients: + client.0: + - cls/test_cls_rbd.sh +- print: "**** done cls/test_cls_rbd.sh 5-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/rbd-import-export.yaml b/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/rbd-import-export.yaml new file mode 100644 index 000000000..a3968fef3 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/rbd-import-export.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + run basic import/export cli tests for rbd +stress-tasks: +- workunit: + branch: octopus + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format +- print: "**** done rbd/import_export.sh 5-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/rbd_api.yaml b/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/rbd_api.yaml new file mode 100644 index 000000000..7212d3fc6 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/rbd_api.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + librbd C and C++ api tests +stress-tasks: +- workunit: + branch: octopus + clients: + client.0: + - rbd/test_librbd.sh +- print: "**** done rbd/test_librbd.sh 7-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/readwrite.yaml b/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/readwrite.yaml new file mode 100644 index 000000000..41e34d6d7 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/readwrite.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool, + using only reads, writes, and deletes +stress-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 45 + write: 45 + delete: 10 +- print: "**** done rados/readwrite 5-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/snaps-few-objects.yaml b/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/snaps-few-objects.yaml new file mode 100644 index 000000000..f56d0de0f --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/3-stress-tasks/snaps-few-objects.yaml @@ -0,0 +1,18 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +stress-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 +- print: "**** done rados/snaps-few-objects 5-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split/4-second-half-tasks/radosbench.yaml b/qa/suites/upgrade/octopus-x/stress-split/4-second-half-tasks/radosbench.yaml new file mode 100644 index 000000000..7268cb170 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/4-second-half-tasks/radosbench.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + run randomized correctness test for rados operations + generate write load with rados bench +second-half-tasks: +- full_sequential: + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 +- print: "**** done end radosbench.yaml" diff --git a/qa/suites/upgrade/octopus-x/stress-split/4-second-half-tasks/rbd-import-export.yaml b/qa/suites/upgrade/octopus-x/stress-split/4-second-half-tasks/rbd-import-export.yaml new file mode 100644 index 000000000..f223bda46 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/4-second-half-tasks/rbd-import-export.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + run basic import/export cli tests for rbd +second-half-tasks: +- workunit: + branch: octopus + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format +- print: "**** done rbd/import_export.sh 5-workload" diff --git a/qa/suites/upgrade/octopus-x/stress-split/mon_election b/qa/suites/upgrade/octopus-x/stress-split/mon_election new file mode 120000 index 000000000..3f331e621 --- /dev/null +++ b/qa/suites/upgrade/octopus-x/stress-split/mon_election @@ -0,0 +1 @@ +.qa/mon_election
\ No newline at end of file diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-parallel/% b/qa/suites/upgrade/pacific-p2p/pacific-p2p-parallel/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-parallel/% diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-parallel/point-to-point-upgrade.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-parallel/point-to-point-upgrade.yaml new file mode 100644 index 000000000..ebaf84199 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-parallel/point-to-point-upgrade.yaml @@ -0,0 +1,178 @@ +meta: +- desc: | + Run ceph on two nodes, using one of them as a client, + with a separate client-only node. + Use xfs beneath the osds. + install ceph/pacific v16.2.5 and the v16.2.x point versions + run workload and upgrade-sequence in parallel + (every point release should be tested) + run workload and upgrade-sequence in parallel + install ceph/pacific latest version + run workload and upgrade-sequence in parallel + Overall upgrade path is - pacific-latest.point-1 => pacific-latest.point => pacific-latest +overrides: + ceph: + log-ignorelist: + - reached quota + - scrub + - osd_map_max_advance + - wrongly marked + - FS_DEGRADED + - POOL_APP_NOT_ENABLED + - CACHE_POOL_NO_HIT_SET + - POOL_FULL + - SMALLER_PG + - pool\(s\) full + - OSD_DOWN + - missing hit_sets + - CACHE_POOL_NEAR_FULL + - PG_AVAILABILITY + - PG_DEGRADED + - application not enabled + - cache pools at or near target size + - filesystem is degraded + - OBJECT_MISPLACED + ### ref: https://tracker.ceph.com/issues/40251 + #removed see ^ - failed to encode map + + fs: xfs + + conf: + global: + mon_warn_on_pool_no_app: false + mon_mds_skip_sanity: true + mon: + mon debug unsafe allow tier with nonempty snaps: true + osd: + osd map max advance: 1000 + osd_class_default_list: "*" + osd_class_load_list: "*" + client: + rgw_crypt_require_ssl: false + rgw crypt s3 kms backend: testing + rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo= +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 + - mgr.x +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 +- - client.1 +openstack: +- volumes: # attached to each instance + count: 3 + size: 30 # GB +tasks: +- print: "**** done pacific about to install v16.2.5 " +- install: + tag: v16.2.5 + # line below can be removed its from jewel test + #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2'] +- print: "**** done v16.2.5 install" +- ceph: + fs: xfs + add_osds_to_crush: true +- print: "**** done ceph xfs" +- sequential: + - workload +- print: "**** done workload v16.2.5" + + +####### upgrade to v16.2.7 +- install.upgrade: + #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev'] + mon.a: + tag: v16.2.7 + mon.b: + tag: v16.2.7 +- parallel: + - workload_pacific + - upgrade-sequence_pacific +- print: "**** done parallel pacific v16.2.5" + +#### upgrade to latest pacific +- install.upgrade: + mon.a: + mon.b: +- parallel: + - workload_pacific + - upgrade-sequence_pacific +- print: "**** done parallel pacific branch" + +####################### +workload: + sequential: + - workunit: + clients: + client.0: + - suites/blogbench.sh +workload_pacific: + full_sequential: + - workunit: + branch: pacific + #tag: v16.2.1 + clients: + client.1: + - rados/test.sh + - cls + env: + CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces' + - print: "**** done rados/test.sh & cls workload_pacific" + - sequential: + - rgw: [client.0] + - print: "**** done rgw workload_pacific" + - s3tests: + client.0: + force-branch: ceph-pacific + rgw_server: client.0 + scan_for_encryption_keys: false + - print: "**** done s3tests workload_pacific" + - rbd_fsx: + clients: [client.0] + size: 134217728 + - print: "**** done rbd_fsx workload_pacific" + +upgrade-sequence_pacific: + sequential: + - print: "**** done branch: pacific install.upgrade" + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mgr.x] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - print: "**** done ceph.restart all pacific branch mds/osd/mon" diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-parallel/supported-all-distro/ubuntu_latest.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-parallel/supported-all-distro/ubuntu_latest.yaml new file mode 100644 index 000000000..f20398230 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-parallel/supported-all-distro/ubuntu_latest.yaml @@ -0,0 +1,2 @@ +os_type: ubuntu +os_version: "20.04" diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/% b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/% diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/0-cluster/+ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/0-cluster/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/0-cluster/+ diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/0-cluster/openstack.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/0-cluster/openstack.yaml new file mode 100644 index 000000000..5caffc353 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/0-cluster/openstack.yaml @@ -0,0 +1,6 @@ +openstack: + - machine: + disk: 100 # GB + - volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/0-cluster/start.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/0-cluster/start.yaml new file mode 100644 index 000000000..1271edd8b --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/0-cluster/start.yaml @@ -0,0 +1,33 @@ +meta: +- desc: | + Run ceph on two nodes, + with a separate client-only node. + Use xfs beneath the osds. +overrides: + ceph: + fs: xfs + log-ignorelist: + - overall HEALTH_ + - \(MON_DOWN\) + - \(MGR_DOWN\) + ### ref: https://tracker.ceph.com/issues/40251 + #removed see ^ - failed to encode map + conf: + global: + enable experimental unrecoverable data corrupting features: "*" + mon: + mon warn on osd down out interval zero: false +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +- - osd.4 + - osd.5 + - osd.6 + - osd.7 +- - client.0 diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/1-ceph-install/pacific.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/1-ceph-install/pacific.yaml new file mode 100644 index 000000000..d8e3b6e03 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/1-ceph-install/pacific.yaml @@ -0,0 +1,21 @@ +meta: +- desc: | + install ceph/pacific v16.2.7 + Overall upgrade path is - pacific-latest.point => pacific-latest +tasks: +- install: + tag: v16.2.7 + exclude_packages: ['librados3'] + extra_packages: ['librados2'] +- print: "**** done install pacific v16.2.7" +- ceph: +- exec: + osd.0: + - ceph osd require-osd-release pacific + - ceph osd set-require-min-compat-client pacific +- print: "**** done ceph" +overrides: + ceph: + conf: + mon: + mon warn on osd down out interval zero: false diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/1.1.short_pg_log.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/1.1.short_pg_log.yaml new file mode 100644 index 000000000..20cc101de --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/1.1.short_pg_log.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + global: + osd_min_pg_log_entries: 1 + osd_max_pg_log_entries: 2 diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/2-partial-upgrade/firsthalf.yaml new file mode 100644 index 000000000..02ba5c1bb --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/2-partial-upgrade/firsthalf.yaml @@ -0,0 +1,13 @@ +meta: +- desc: | + install upgrade ceph/-x on one node only + 1st half + restart : osd.0,1,2,3 +tasks: +- install.upgrade: + osd.0: +- print: "**** done install.upgrade osd.0" +- ceph.restart: + daemons: [mon.a,mon.b,mon.c,mgr.x,osd.0,osd.1,osd.2,osd.3] + mon-health-to-clog: false +- print: "**** done ceph.restart 1st half" diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/3-thrash/default.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/3-thrash/default.yaml new file mode 100644 index 000000000..c739d8fea --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/3-thrash/default.yaml @@ -0,0 +1,27 @@ +meta: +- desc: | + randomly kill and revive osd + small chance to increase the number of pgs +overrides: + ceph: + log-ignorelist: + - but it is still running + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch + ### ref: https://tracker.ceph.com/issues/40251 + - failed to encode map +tasks: +- parallel: + - stress-tasks +stress-tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + chance_thrash_cluster_full: 0 + chance_thrash_pg_upmap: 0 + chance_thrash_pg_upmap_items: 0 + disable_objectstore_tool_tests: true + chance_force_recovery: 0 +- print: "**** done thrashosds 3-thrash" diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/+ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/+ diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/fsx.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/fsx.yaml new file mode 100644 index 000000000..fd4081f23 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/fsx.yaml @@ -0,0 +1,8 @@ +meta: +- desc: | + run basic fsx tests for rbd +stress-tasks: +- rbd_fsx: + clients: [client.0] + size: 134217728 +- print: "**** done rbd_fsx 4-workload" diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/radosbench.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/radosbench.yaml new file mode 100644 index 000000000..c545936c0 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/radosbench.yaml @@ -0,0 +1,52 @@ +meta: +- desc: | + run randomized correctness test for rados operations + generate write load with rados bench +stress-tasks: +- full_sequential: + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 +- print: "**** done radosbench 4-workload" diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/rbd-cls.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/rbd-cls.yaml new file mode 100644 index 000000000..caaac875c --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/rbd-cls.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + run basic cls tests for rbd +stress-tasks: +- workunit: + branch: pacific + clients: + client.0: + - cls/test_cls_rbd.sh +- print: "**** done cls/test_cls_rbd.sh 4-workload" diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/rbd-import-export.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/rbd-import-export.yaml new file mode 100644 index 000000000..f999bd0c8 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/rbd-import-export.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + run basic import/export cli tests for rbd +stress-tasks: +- workunit: + branch: pacific + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format +- print: "**** done rbd/import_export.sh 4-workload" diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/rbd_api.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/rbd_api.yaml new file mode 100644 index 000000000..95c820161 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/rbd_api.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + librbd C and C++ api tests +stress-tasks: +- workunit: + branch: octopus + clients: + client.0: + - rbd/test_librbd.sh +- print: "**** done rbd/test_librbd.sh 4-workload" diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/readwrite.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/readwrite.yaml new file mode 100644 index 000000000..456868998 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/readwrite.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool, + using only reads, writes, and deletes +stress-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 45 + write: 45 + delete: 10 +- print: "**** done rados/readwrite 4-workload" diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/snaps-few-objects.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/snaps-few-objects.yaml new file mode 100644 index 000000000..ae232d867 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/4-workload/snaps-few-objects.yaml @@ -0,0 +1,18 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +stress-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 +- print: "**** done rados/snaps-few-objects 4-workload" diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/5-finish-upgrade.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/5-finish-upgrade.yaml new file mode 100644 index 000000000..803737c72 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/5-finish-upgrade.yaml @@ -0,0 +1,8 @@ +tasks: +- install.upgrade: + osd.4: + client.0: +- ceph.restart: + daemons: [osd.4, osd.5, osd.6, osd.7] + wait-for-healthy: false + wait-for-osds-up: true diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/6-final-workload/+ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/6-final-workload/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/6-final-workload/+ diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/6-final-workload/rbd-python.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/6-final-workload/rbd-python.yaml new file mode 100644 index 000000000..4ca4e7485 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/6-final-workload/rbd-python.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + librbd python api tests +tasks: +- workunit: + tag: v16.2.7 + clients: + client.0: + - rbd/test_librbd_python.sh +- print: "**** done rbd/test_librbd_python.sh 7-workload" diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/6-final-workload/snaps-many-objects.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/6-final-workload/snaps-many-objects.yaml new file mode 100644 index 000000000..805bf97c3 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/6-final-workload/snaps-many-objects.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/objectstore/bluestore-bitmap.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/objectstore/bluestore-bitmap.yaml new file mode 100644 index 000000000..b18e04bee --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/objectstore/bluestore-bitmap.yaml @@ -0,0 +1,43 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + bluestore allocator: bitmap + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true + bdev enable discard: true + bdev async discard: true + ceph-deploy: + fs: xfs + bluestore: yes + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 + bdev enable discard: true + bdev async discard: true + diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/objectstore/bluestore-comp.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/objectstore/bluestore-comp.yaml new file mode 100644 index 000000000..b408032fd --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/objectstore/bluestore-comp.yaml @@ -0,0 +1,23 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore compression mode: aggressive + bluestore fsck on mount: true + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 + +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/objectstore/bluestore-stupid.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/objectstore/bluestore-stupid.yaml new file mode 100644 index 000000000..ca811f131 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/objectstore/bluestore-stupid.yaml @@ -0,0 +1,43 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + bluestore allocator: stupid + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true + bdev enable discard: true + bdev async discard: true + ceph-deploy: + fs: xfs + bluestore: yes + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 + bdev enable discard: true + bdev async discard: true + diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/objectstore/filestore-xfs.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/objectstore/filestore-xfs.yaml new file mode 100644 index 000000000..f7aa0dd79 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/objectstore/filestore-xfs.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + fs: xfs + conf: + osd: + osd objectstore: filestore + osd sloppy crc: true + ceph-deploy: + fs: xfs + filestore: True + conf: + osd: + osd objectstore: filestore + osd sloppy crc: true + diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/supported-all-distro/ubuntu_latest.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/supported-all-distro/ubuntu_latest.yaml new file mode 100644 index 000000000..f20398230 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/supported-all-distro/ubuntu_latest.yaml @@ -0,0 +1,2 @@ +os_type: ubuntu +os_version: "20.04" diff --git a/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/thrashosds-health.yaml b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/thrashosds-health.yaml new file mode 100644 index 000000000..9903fa578 --- /dev/null +++ b/qa/suites/upgrade/pacific-p2p/pacific-p2p-stress-split/thrashosds-health.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + log-ignorelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + - \(SMALLER_PGP_NUM\) + - \(OBJECT_ + - \(SLOW_OPS\) + - \(REQUEST_SLOW\) + - \(TOO_FEW_PGS\) + - slow request |