summaryrefslogtreecommitdiffstats
path: root/qa/suites/upgrade
diff options
context:
space:
mode:
Diffstat (limited to 'qa/suites/upgrade')
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-parallel/%0
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-parallel/point-to-point-upgrade.yaml173
l---------qa/suites/upgrade/reef-p2p/reef-p2p-parallel/supported-all-distro/centos_8.yaml1
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-parallel/supported-all-distro/ubuntu_latest.yaml2
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/%0
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/+0
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/openstack.yaml6
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/start.yaml33
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/1-ceph-install/reef.yaml21
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/1.1.short_pg_log.yaml6
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/2-partial-upgrade/firsthalf.yaml13
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/3-thrash/default.yaml27
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/+0
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/fsx.yaml8
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/radosbench.yaml52
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd-cls.yaml12
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd-import-export.yaml12
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd_api.yaml18
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/readwrite.yaml16
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/snaps-few-objects.yaml18
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/5-finish-upgrade.yaml8
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/+0
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/rbd-python.yaml10
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/snaps-many-objects.yaml16
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-bitmap.yaml43
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-comp.yaml23
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-stupid.yaml43
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/supported-all-distro/ubuntu_latest.yaml2
-rw-r--r--qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/thrashosds-health.yaml15
29 files changed, 578 insertions, 0 deletions
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/% b/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/%
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/%
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/point-to-point-upgrade.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/point-to-point-upgrade.yaml
new file mode 100644
index 000000000..443b89fcf
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/point-to-point-upgrade.yaml
@@ -0,0 +1,173 @@
+meta:
+- desc: |
+ Run ceph on two nodes, using one of them as a client,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+ install ceph/reef v18.2.1 and the v18.2.x point versions
+ run workload and upgrade-sequence in parallel
+ (every point release should be tested)
+ run workload and upgrade-sequence in parallel
+ install ceph/reef latest version
+ run workload and upgrade-sequence in parallel
+ Overall upgrade path is - reef-latest.point-1 => reef-latest.point => reef-latest
+overrides:
+ ceph:
+ log-ignorelist:
+ - reached quota
+ - scrub
+ - osd_map_max_advance
+ - wrongly marked
+ - FS_DEGRADED
+ - POOL_APP_NOT_ENABLED
+ - CACHE_POOL_NO_HIT_SET
+ - POOL_FULL
+ - SMALLER_PG
+ - pool\(s\) full
+ - OSD_DOWN
+ - missing hit_sets
+ - CACHE_POOL_NEAR_FULL
+ - PG_AVAILABILITY
+ - PG_DEGRADED
+ - application not enabled
+ - cache pools at or near target size
+ - filesystem is degraded
+ - OBJECT_MISPLACED
+ ### ref: https://tracker.ceph.com/issues/40251
+ #removed see ^ - failed to encode map
+
+ fs: xfs
+
+ conf:
+ global:
+ mon_warn_on_pool_no_app: false
+ mon_mds_skip_sanity: true
+ mon:
+ mon debug unsafe allow tier with nonempty snaps: true
+ osd:
+ osd map max advance: 1000
+ osd_class_default_list: "*"
+ osd_class_load_list: "*"
+ client:
+ rgw_crypt_require_ssl: false
+ rgw crypt s3 kms backend: testing
+ rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
+roles:
+- - mon.a
+ - mds.a
+ - osd.0
+ - osd.1
+ - osd.2
+ - mgr.x
+- - mon.b
+ - mon.c
+ - osd.3
+ - osd.4
+ - osd.5
+ - client.0
+- - client.1
+openstack:
+- volumes: # attached to each instance
+ count: 3
+ size: 30 # GB
+tasks:
+- print: "**** done reef about to install v18.2.0 "
+- install:
+ tag: v18.2.0
+ # line below can be removed its from jewel test
+ #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2']
+- print: "**** done v18.2.0 install"
+- ceph:
+ fs: xfs
+ add_osds_to_crush: true
+- print: "**** done ceph xfs"
+- sequential:
+ - workload
+- print: "**** done workload v18.2.0"
+
+
+####### upgrade to v18.2.1
+- install.upgrade:
+ #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev']
+ mon.a:
+ tag: v18.2.1
+ mon.b:
+ tag: v18.2.1
+- parallel:
+ - workload_reef
+ - upgrade-sequence_reef
+- print: "**** done parallel reef v18.2.1"
+
+#### upgrade to latest reef
+- install.upgrade:
+ mon.a:
+ mon.b:
+- parallel:
+ - workload_reef
+ - upgrade-sequence_reef
+- print: "**** done parallel reef branch"
+
+#######################
+workload:
+ sequential:
+ - workunit:
+ clients:
+ client.0:
+ - suites/blogbench.sh
+
+workload_reef:
+ full_sequential:
+ - workunit:
+ branch: reef
+ # tag: v18.2.1
+ clients:
+ client.1:
+ - rados/test.sh
+ - cls
+ env:
+ CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_snapshot'
+ - print: "**** done rados/test.sh & cls workload_reef"
+ - sequential:
+ - rgw: [client.0]
+ - print: "**** done rgw workload_reef"
+ - rbd_fsx:
+ clients: [client.0]
+ size: 134217728
+ - print: "**** done rbd_fsx workload_reef"
+
+upgrade-sequence_reef:
+ sequential:
+ - print: "**** done branch: reef install.upgrade"
+ - ceph.restart: [mds.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [osd.0]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.1]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.2]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.3]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.4]
+ - sleep:
+ duration: 30
+ - ceph.restart: [osd.5]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mgr.x]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.a]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.b]
+ - sleep:
+ duration: 60
+ - ceph.restart: [mon.c]
+ - sleep:
+ duration: 60
+ - print: "**** done ceph.restart all reef branch mds/osd/mon"
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/supported-all-distro/centos_8.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/supported-all-distro/centos_8.yaml
new file mode 120000
index 000000000..bb4a6aaf3
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/supported-all-distro/centos_8.yaml
@@ -0,0 +1 @@
+../../../../../distros/supported-all-distro/centos_8.yaml \ No newline at end of file
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/supported-all-distro/ubuntu_latest.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/supported-all-distro/ubuntu_latest.yaml
new file mode 100644
index 000000000..f20398230
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/supported-all-distro/ubuntu_latest.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "20.04"
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/% b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/%
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/%
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/+ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/+
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/+
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/openstack.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/openstack.yaml
new file mode 100644
index 000000000..5caffc353
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/openstack.yaml
@@ -0,0 +1,6 @@
+openstack:
+ - machine:
+ disk: 100 # GB
+ - volumes: # attached to each instance
+ count: 4
+ size: 30 # GB
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/start.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/start.yaml
new file mode 100644
index 000000000..1271edd8b
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/start.yaml
@@ -0,0 +1,33 @@
+meta:
+- desc: |
+ Run ceph on two nodes,
+ with a separate client-only node.
+ Use xfs beneath the osds.
+overrides:
+ ceph:
+ fs: xfs
+ log-ignorelist:
+ - overall HEALTH_
+ - \(MON_DOWN\)
+ - \(MGR_DOWN\)
+ ### ref: https://tracker.ceph.com/issues/40251
+ #removed see ^ - failed to encode map
+ conf:
+ global:
+ enable experimental unrecoverable data corrupting features: "*"
+ mon:
+ mon warn on osd down out interval zero: false
+roles:
+- - mon.a
+ - mon.b
+ - mon.c
+ - mgr.x
+ - osd.0
+ - osd.1
+ - osd.2
+ - osd.3
+- - osd.4
+ - osd.5
+ - osd.6
+ - osd.7
+- - client.0
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/1-ceph-install/reef.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/1-ceph-install/reef.yaml
new file mode 100644
index 000000000..0c7db6ae4
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/1-ceph-install/reef.yaml
@@ -0,0 +1,21 @@
+meta:
+- desc: |
+ install ceph/reef v18.2.0
+ Overall upgrade path is - reef-latest.point -1 => reef-latest
+tasks:
+- install:
+ tag: v18.2.0
+ exclude_packages: ['librados3']
+ extra_packages: ['librados2']
+- print: "**** done install reef v18.2.0"
+- ceph:
+- exec:
+ osd.0:
+ - ceph osd require-osd-release reef
+ - ceph osd set-require-min-compat-client reef
+- print: "**** done ceph"
+overrides:
+ ceph:
+ conf:
+ mon:
+ mon warn on osd down out interval zero: false
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/1.1.short_pg_log.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/1.1.short_pg_log.yaml
new file mode 100644
index 000000000..20cc101de
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/1.1.short_pg_log.yaml
@@ -0,0 +1,6 @@
+overrides:
+ ceph:
+ conf:
+ global:
+ osd_min_pg_log_entries: 1
+ osd_max_pg_log_entries: 2
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/2-partial-upgrade/firsthalf.yaml
new file mode 100644
index 000000000..02ba5c1bb
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/2-partial-upgrade/firsthalf.yaml
@@ -0,0 +1,13 @@
+meta:
+- desc: |
+ install upgrade ceph/-x on one node only
+ 1st half
+ restart : osd.0,1,2,3
+tasks:
+- install.upgrade:
+ osd.0:
+- print: "**** done install.upgrade osd.0"
+- ceph.restart:
+ daemons: [mon.a,mon.b,mon.c,mgr.x,osd.0,osd.1,osd.2,osd.3]
+ mon-health-to-clog: false
+- print: "**** done ceph.restart 1st half"
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/3-thrash/default.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/3-thrash/default.yaml
new file mode 100644
index 000000000..c739d8fea
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/3-thrash/default.yaml
@@ -0,0 +1,27 @@
+meta:
+- desc: |
+ randomly kill and revive osd
+ small chance to increase the number of pgs
+overrides:
+ ceph:
+ log-ignorelist:
+ - but it is still running
+ - wrongly marked me down
+ - objects unfound and apparently lost
+ - log bound mismatch
+ ### ref: https://tracker.ceph.com/issues/40251
+ - failed to encode map
+tasks:
+- parallel:
+ - stress-tasks
+stress-tasks:
+- thrashosds:
+ timeout: 1200
+ chance_pgnum_grow: 1
+ chance_pgpnum_fix: 1
+ chance_thrash_cluster_full: 0
+ chance_thrash_pg_upmap: 0
+ chance_thrash_pg_upmap_items: 0
+ disable_objectstore_tool_tests: true
+ chance_force_recovery: 0
+- print: "**** done thrashosds 3-thrash"
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/+ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/+
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/+
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/fsx.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/fsx.yaml
new file mode 100644
index 000000000..fd4081f23
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/fsx.yaml
@@ -0,0 +1,8 @@
+meta:
+- desc: |
+ run basic fsx tests for rbd
+stress-tasks:
+- rbd_fsx:
+ clients: [client.0]
+ size: 134217728
+- print: "**** done rbd_fsx 4-workload"
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/radosbench.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/radosbench.yaml
new file mode 100644
index 000000000..c545936c0
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/radosbench.yaml
@@ -0,0 +1,52 @@
+meta:
+- desc: |
+ run randomized correctness test for rados operations
+ generate write load with rados bench
+stress-tasks:
+- full_sequential:
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+ - radosbench:
+ clients: [client.0]
+ time: 90
+- print: "**** done radosbench 4-workload"
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd-cls.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd-cls.yaml
new file mode 100644
index 000000000..c0445533d
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd-cls.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ run basic cls tests for rbd
+stress-tasks:
+- workunit:
+ branch: reef
+ clients:
+ client.0:
+ - cls/test_cls_rbd.sh
+ env:
+ CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_snapshot'
+- print: "**** done cls/test_cls_rbd.sh 4-workload"
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd-import-export.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd-import-export.yaml
new file mode 100644
index 000000000..a4bea35a4
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd-import-export.yaml
@@ -0,0 +1,12 @@
+meta:
+- desc: |
+ run basic import/export cli tests for rbd
+stress-tasks:
+- workunit:
+ branch: reef
+ clients:
+ client.0:
+ - rbd/import_export.sh
+ env:
+ RBD_CREATE_ARGS: --new-format
+- print: "**** done rbd/import_export.sh 4-workload"
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd_api.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd_api.yaml
new file mode 100644
index 000000000..025616655
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd_api.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ librbd C and C++ api tests
+overrides:
+ ceph:
+ log-ignorelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_APP_NOT_ENABLED\)
+ - is full \(reached quota
+ - \(POOL_FULL\)
+stress-tasks:
+- workunit:
+ branch: reef
+ clients:
+ client.0:
+ - rbd/test_librbd.sh
+- print: "**** done rbd/test_librbd.sh 4-workload"
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/readwrite.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/readwrite.yaml
new file mode 100644
index 000000000..456868998
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/readwrite.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool,
+ using only reads, writes, and deletes
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 45
+ write: 45
+ delete: 10
+- print: "**** done rados/readwrite 4-workload"
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/snaps-few-objects.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/snaps-few-objects.yaml
new file mode 100644
index 000000000..ae232d867
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/snaps-few-objects.yaml
@@ -0,0 +1,18 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+stress-tasks:
+- full_sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+- print: "**** done rados/snaps-few-objects 4-workload"
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/5-finish-upgrade.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/5-finish-upgrade.yaml
new file mode 100644
index 000000000..803737c72
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/5-finish-upgrade.yaml
@@ -0,0 +1,8 @@
+tasks:
+- install.upgrade:
+ osd.4:
+ client.0:
+- ceph.restart:
+ daemons: [osd.4, osd.5, osd.6, osd.7]
+ wait-for-healthy: false
+ wait-for-osds-up: true
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/+ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/+
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/+
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/rbd-python.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/rbd-python.yaml
new file mode 100644
index 000000000..78e68dbdb
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/rbd-python.yaml
@@ -0,0 +1,10 @@
+meta:
+- desc: |
+ librbd python api tests
+tasks:
+- workunit:
+ branch: reef
+ clients:
+ client.0:
+ - rbd/test_librbd_python.sh
+- print: "**** done rbd/test_librbd_python.sh 7-workload"
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/snaps-many-objects.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/snaps-many-objects.yaml
new file mode 100644
index 000000000..805bf97c3
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/snaps-many-objects.yaml
@@ -0,0 +1,16 @@
+meta:
+- desc: |
+ randomized correctness test for rados operations on a replicated pool with snapshot operations
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 500
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-bitmap.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-bitmap.yaml
new file mode 100644
index 000000000..b18e04bee
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-bitmap.yaml
@@ -0,0 +1,43 @@
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore fsck on mount: true
+ bluestore allocator: bitmap
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# bluestore bluefs env mirror: true
+ bdev enable discard: true
+ bdev async discard: true
+ ceph-deploy:
+ fs: xfs
+ bluestore: yes
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore fsck on mount: true
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
+ bdev enable discard: true
+ bdev async discard: true
+
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-comp.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-comp.yaml
new file mode 100644
index 000000000..b408032fd
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-comp.yaml
@@ -0,0 +1,23 @@
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore compression mode: aggressive
+ bluestore fsck on mount: true
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
+
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# bluestore bluefs env mirror: true
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-stupid.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-stupid.yaml
new file mode 100644
index 000000000..ca811f131
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-stupid.yaml
@@ -0,0 +1,43 @@
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore fsck on mount: true
+ bluestore allocator: stupid
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# bluestore bluefs env mirror: true
+ bdev enable discard: true
+ bdev async discard: true
+ ceph-deploy:
+ fs: xfs
+ bluestore: yes
+ conf:
+ osd:
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ debug bluestore: 20
+ debug bluefs: 20
+ debug rocksdb: 10
+ bluestore fsck on mount: true
+ # lower the full ratios since we can fill up a 100gb osd so quickly
+ mon osd full ratio: .9
+ mon osd backfillfull_ratio: .85
+ mon osd nearfull ratio: .8
+ osd failsafe full ratio: .95
+ bdev enable discard: true
+ bdev async discard: true
+
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/supported-all-distro/ubuntu_latest.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/supported-all-distro/ubuntu_latest.yaml
new file mode 100644
index 000000000..f20398230
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/supported-all-distro/ubuntu_latest.yaml
@@ -0,0 +1,2 @@
+os_type: ubuntu
+os_version: "20.04"
diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/thrashosds-health.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/thrashosds-health.yaml
new file mode 100644
index 000000000..9903fa578
--- /dev/null
+++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/thrashosds-health.yaml
@@ -0,0 +1,15 @@
+overrides:
+ ceph:
+ log-ignorelist:
+ - overall HEALTH_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(POOL_
+ - \(CACHE_POOL_
+ - \(SMALLER_PGP_NUM\)
+ - \(OBJECT_
+ - \(SLOW_OPS\)
+ - \(REQUEST_SLOW\)
+ - \(TOO_FEW_PGS\)
+ - slow request