summaryrefslogtreecommitdiffstats
path: root/qa/suites/rbd/cli/pool
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /qa/suites/rbd/cli/pool
parentInitial commit. (diff)
downloadceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz
ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/suites/rbd/cli/pool')
l---------qa/suites/rbd/cli/pool/.qa1
-rw-r--r--qa/suites/rbd/cli/pool/ec-data-pool.yaml27
-rw-r--r--qa/suites/rbd/cli/pool/none.yaml0
-rw-r--r--qa/suites/rbd/cli/pool/replicated-data-pool.yaml11
-rw-r--r--qa/suites/rbd/cli/pool/small-cache-pool.yaml17
5 files changed, 56 insertions, 0 deletions
diff --git a/qa/suites/rbd/cli/pool/.qa b/qa/suites/rbd/cli/pool/.qa
new file mode 120000
index 00000000..a602a035
--- /dev/null
+++ b/qa/suites/rbd/cli/pool/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rbd/cli/pool/ec-data-pool.yaml b/qa/suites/rbd/cli/pool/ec-data-pool.yaml
new file mode 100644
index 00000000..376bf08e
--- /dev/null
+++ b/qa/suites/rbd/cli/pool/ec-data-pool.yaml
@@ -0,0 +1,27 @@
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
+ - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
+ - sudo ceph osd pool set datapool allow_ec_overwrites true
+ - rbd pool init datapool
+
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NO_HIT_SET\)
+ conf:
+ client:
+ rbd default data pool: datapool
+ osd: # force bluestore since it's required for ec overwrites
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ enable experimental unrecoverable data corrupting features: "*"
+ osd debug randomize hobject sort order: false
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# bluestore bluefs env mirror: true
diff --git a/qa/suites/rbd/cli/pool/none.yaml b/qa/suites/rbd/cli/pool/none.yaml
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/qa/suites/rbd/cli/pool/none.yaml
diff --git a/qa/suites/rbd/cli/pool/replicated-data-pool.yaml b/qa/suites/rbd/cli/pool/replicated-data-pool.yaml
new file mode 100644
index 00000000..c5647dba
--- /dev/null
+++ b/qa/suites/rbd/cli/pool/replicated-data-pool.yaml
@@ -0,0 +1,11 @@
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create datapool 4
+ - rbd pool init datapool
+
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default data pool: datapool
diff --git a/qa/suites/rbd/cli/pool/small-cache-pool.yaml b/qa/suites/rbd/cli/pool/small-cache-pool.yaml
new file mode 100644
index 00000000..1b505657
--- /dev/null
+++ b/qa/suites/rbd/cli/pool/small-cache-pool.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ log-whitelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(CACHE_POOL_NO_HIT_SET\)
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add rbd cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay rbd cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250