summaryrefslogtreecommitdiffstats
path: root/qa/erasure-code
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /qa/erasure-code
parentInitial commit. (diff)
downloadceph-upstream.tar.xz
ceph-upstream.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/erasure-code')
-rw-r--r--qa/erasure-code/ec-feature-plugins-v2.yaml98
-rw-r--r--qa/erasure-code/ec-feature-plugins-v3.yaml98
-rw-r--r--qa/erasure-code/ec-rados-default.yaml19
-rw-r--r--qa/erasure-code/ec-rados-parallel.yaml20
-rw-r--r--qa/erasure-code/ec-rados-plugin=clay-k=4-m=2.yaml25
-rw-r--r--qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml26
-rw-r--r--qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml25
-rw-r--r--qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml31
-rw-r--r--qa/erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml25
-rw-r--r--qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml25
-rw-r--r--qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml25
-rw-r--r--qa/erasure-code/ec-rados-sequential.yaml20
12 files changed, 437 insertions, 0 deletions
diff --git a/qa/erasure-code/ec-feature-plugins-v2.yaml b/qa/erasure-code/ec-feature-plugins-v2.yaml
new file mode 100644
index 00000000..f2d374dd
--- /dev/null
+++ b/qa/erasure-code/ec-feature-plugins-v2.yaml
@@ -0,0 +1,98 @@
+#
+# Test the expected behavior of the
+#
+# CEPH_FEATURE_ERASURE_CODE_PLUGINS_V2
+#
+# feature.
+#
+roles:
+- - mon.a
+ - mon.b
+ - osd.0
+ - osd.1
+- - osd.2
+ - mon.c
+ - mgr.x
+tasks:
+#
+# Install firefly
+#
+- install:
+ branch: firefly
+- ceph:
+ fs: xfs
+#
+# We don't need mon.c for now: it will be used later to make sure an old
+# mon cannot join the quorum once the feature has been activated
+#
+- ceph.stop:
+ daemons: [mon.c]
+- exec:
+ mon.a:
+ - |-
+ ceph osd erasure-code-profile set WRONG plugin=WRONG
+ ceph osd pool create poolWRONG 12 12 erasure WRONG 2>&1 | grep "failed to load plugin using profile WRONG"
+#
+# Partial upgrade, osd.2 is not upgraded
+#
+- install.upgrade:
+ osd.0:
+#
+# a is the leader
+#
+- ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+- exec:
+ mon.a:
+ - |-
+ ceph osd erasure-code-profile set profile-lrc plugin=lrc 2>&1 | grep "unsupported by: the monitor cluster"
+- ceph.restart:
+ daemons: [mon.b, osd.1, osd.0]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+#
+# The lrc plugin cannot be used because osd.2 is not upgraded yet
+# and would crash.
+#
+- exec:
+ mon.a:
+ - |-
+ ceph osd erasure-code-profile set profile-lrc plugin=lrc 2>&1 | grep "unsupported by: osd.2"
+#
+# Taking osd.2 out, the rest of the cluster is upgraded
+#
+- ceph.stop:
+ daemons: [osd.2]
+- sleep:
+ duration: 60
+#
+# Creating an erasure code profile using the lrc plugin now works
+#
+- exec:
+ mon.a:
+ - "ceph osd erasure-code-profile set profile-lrc plugin=lrc"
+#
+# osd.2 won't be able to join the because is does not support the feature
+#
+- ceph.restart:
+ daemons: [osd.2]
+ wait-for-healthy: false
+- sleep:
+ duration: 60
+- exec:
+ osd.2:
+ - |-
+ grep "protocol feature.*missing 100000000000" /var/log/ceph/ceph-osd.2.log
+#
+# mon.c won't be able to join the because it does not support the feature
+#
+- ceph.restart:
+ daemons: [mon.c]
+ wait-for-healthy: false
+- sleep:
+ duration: 60
+- exec:
+ mon.c:
+ - |-
+ grep "missing.*feature" /var/log/ceph/ceph-mon.c.log
diff --git a/qa/erasure-code/ec-feature-plugins-v3.yaml b/qa/erasure-code/ec-feature-plugins-v3.yaml
new file mode 100644
index 00000000..332b9440
--- /dev/null
+++ b/qa/erasure-code/ec-feature-plugins-v3.yaml
@@ -0,0 +1,98 @@
+#
+# Test the expected behavior of the
+#
+# CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3
+#
+# feature.
+#
+roles:
+- - mon.a
+ - mon.b
+ - osd.0
+ - osd.1
+- - osd.2
+ - mon.c
+ - mgr.x
+tasks:
+#
+# Install hammer
+#
+- install:
+ branch: hammer
+- ceph:
+ fs: xfs
+#
+# We don't need mon.c for now: it will be used later to make sure an old
+# mon cannot join the quorum once the feature has been activated
+#
+- ceph.stop:
+ daemons: [mon.c]
+- exec:
+ mon.a:
+ - |-
+ ceph osd erasure-code-profile set WRONG plugin=WRONG
+ ceph osd pool create poolWRONG 12 12 erasure WRONG 2>&1 | grep "failed to load plugin using profile WRONG"
+#
+# Partial upgrade, osd.2 is not upgraded
+#
+- install.upgrade:
+ osd.0:
+#
+# a is the leader
+#
+- ceph.restart:
+ daemons: [mon.a]
+ wait-for-healthy: false
+- exec:
+ mon.a:
+ - |-
+ ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by: the monitor cluster"
+- ceph.restart:
+ daemons: [mon.b, osd.1, osd.0]
+ wait-for-healthy: false
+ wait-for-osds-up: true
+#
+# The shec plugin cannot be used because osd.2 is not upgraded yet
+# and would crash.
+#
+- exec:
+ mon.a:
+ - |-
+ ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by: osd.2"
+#
+# Taking osd.2 out, the rest of the cluster is upgraded
+#
+- ceph.stop:
+ daemons: [osd.2]
+- sleep:
+ duration: 60
+#
+# Creating an erasure code profile using the shec plugin now works
+#
+- exec:
+ mon.a:
+ - "ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec"
+#
+# osd.2 won't be able to join the because is does not support the feature
+#
+- ceph.restart:
+ daemons: [osd.2]
+ wait-for-healthy: false
+- sleep:
+ duration: 60
+- exec:
+ osd.2:
+ - |-
+ grep "protocol feature.*missing" /var/log/ceph/ceph-osd.2.log
+#
+# mon.c won't be able to join the because it does not support the feature
+#
+- ceph.restart:
+ daemons: [mon.c]
+ wait-for-healthy: false
+- sleep:
+ duration: 60
+- exec:
+ mon.c:
+ - |-
+ grep "missing.*feature" /var/log/ceph/ceph-mon.c.log
diff --git a/qa/erasure-code/ec-rados-default.yaml b/qa/erasure-code/ec-rados-default.yaml
new file mode 100644
index 00000000..cc62371e
--- /dev/null
+++ b/qa/erasure-code/ec-rados-default.yaml
@@ -0,0 +1,19 @@
+tasks:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec task"
diff --git a/qa/erasure-code/ec-rados-parallel.yaml b/qa/erasure-code/ec-rados-parallel.yaml
new file mode 100644
index 00000000..0f01d842
--- /dev/null
+++ b/qa/erasure-code/ec-rados-parallel.yaml
@@ -0,0 +1,20 @@
+workload:
+ parallel:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec parallel"
diff --git a/qa/erasure-code/ec-rados-plugin=clay-k=4-m=2.yaml b/qa/erasure-code/ec-rados-plugin=clay-k=4-m=2.yaml
new file mode 100644
index 00000000..2efb8543
--- /dev/null
+++ b/qa/erasure-code/ec-rados-plugin=clay-k=4-m=2.yaml
@@ -0,0 +1,25 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: clay42profile
+ plugin: clay
+ k: 4
+ m: 2
+ technique: reed_sol_van
+ crush-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml b/qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml
new file mode 100644
index 00000000..64b59705
--- /dev/null
+++ b/qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml
@@ -0,0 +1,26 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ min_size: 2
+ write_append_excl: false
+ erasure_code_profile:
+ name: isaprofile
+ plugin: isa
+ k: 2
+ m: 1
+ technique: reed_sol_van
+ crush-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml b/qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml
new file mode 100644
index 00000000..d61b1c8a
--- /dev/null
+++ b/qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml
@@ -0,0 +1,25 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: jerasure21profile
+ plugin: jerasure
+ k: 2
+ m: 1
+ technique: reed_sol_van
+ crush-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml b/qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml
new file mode 100644
index 00000000..2ca53a79
--- /dev/null
+++ b/qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml
@@ -0,0 +1,31 @@
+#
+# k=3 implies a stripe_width of 1376*3 = 4128 which is different from
+# the default value of 4096 It is also not a multiple of 1024*1024 and
+# creates situations where rounding rules during recovery becomes
+# necessary.
+#
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: jerasure31profile
+ plugin: jerasure
+ k: 3
+ m: 1
+ technique: reed_sol_van
+ crush-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/qa/erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml b/qa/erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml
new file mode 100644
index 00000000..dfcc6160
--- /dev/null
+++ b/qa/erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml
@@ -0,0 +1,25 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: jerasure21profile
+ plugin: jerasure
+ k: 4
+ m: 2
+ technique: reed_sol_van
+ crush-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml b/qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml
new file mode 100644
index 00000000..86ae0568
--- /dev/null
+++ b/qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml
@@ -0,0 +1,25 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: lrcprofile
+ plugin: lrc
+ k: 4
+ m: 2
+ l: 3
+ crush-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml b/qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml
new file mode 100644
index 00000000..ee74c6e9
--- /dev/null
+++ b/qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml
@@ -0,0 +1,25 @@
+tasks:
+- rados:
+ clients: [client.0]
+ ops: 400
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ erasure_code_profile:
+ name: shecprofile
+ plugin: shec
+ k: 4
+ m: 3
+ c: 2
+ crush-failure-domain: osd
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
diff --git a/qa/erasure-code/ec-rados-sequential.yaml b/qa/erasure-code/ec-rados-sequential.yaml
new file mode 100644
index 00000000..90536ee6
--- /dev/null
+++ b/qa/erasure-code/ec-rados-sequential.yaml
@@ -0,0 +1,20 @@
+workload:
+ sequential:
+ - rados:
+ clients: [client.0]
+ ops: 4000
+ objects: 50
+ ec_pool: true
+ write_append_excl: false
+ op_weights:
+ read: 100
+ write: 0
+ append: 100
+ delete: 50
+ snap_create: 50
+ snap_remove: 50
+ rollback: 50
+ copy_from: 50
+ setattr: 25
+ rmattr: 25
+ - print: "**** done rados ec sequential"