summaryrefslogtreecommitdiffstats
path: root/qa/suites/rbd/encryption/pool
diff options
context:
space:
mode:
Diffstat (limited to 'qa/suites/rbd/encryption/pool')
l---------qa/suites/rbd/encryption/pool/.qa1
-rw-r--r--qa/suites/rbd/encryption/pool/ec-cache-pool.yaml21
-rw-r--r--qa/suites/rbd/encryption/pool/ec-data-pool.yaml24
-rw-r--r--qa/suites/rbd/encryption/pool/none.yaml0
-rw-r--r--qa/suites/rbd/encryption/pool/replicated-data-pool.yaml11
-rw-r--r--qa/suites/rbd/encryption/pool/small-cache-pool.yaml17
6 files changed, 74 insertions, 0 deletions
diff --git a/qa/suites/rbd/encryption/pool/.qa b/qa/suites/rbd/encryption/pool/.qa
new file mode 120000
index 000000000..a602a0353
--- /dev/null
+++ b/qa/suites/rbd/encryption/pool/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/rbd/encryption/pool/ec-cache-pool.yaml b/qa/suites/rbd/encryption/pool/ec-cache-pool.yaml
new file mode 100644
index 000000000..a0f88b409
--- /dev/null
+++ b/qa/suites/rbd/encryption/pool/ec-cache-pool.yaml
@@ -0,0 +1,21 @@
+overrides:
+ ceph:
+ log-ignorelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(CACHE_POOL_NO_HIT_SET\)
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
+ - sudo ceph osd pool delete rbd rbd --yes-i-really-really-mean-it
+ - sudo ceph osd pool create rbd 4 4 erasure teuthologyprofile
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add rbd cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay rbd cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250
+ - rbd pool init rbd
diff --git a/qa/suites/rbd/encryption/pool/ec-data-pool.yaml b/qa/suites/rbd/encryption/pool/ec-data-pool.yaml
new file mode 100644
index 000000000..f39a5bb4c
--- /dev/null
+++ b/qa/suites/rbd/encryption/pool/ec-data-pool.yaml
@@ -0,0 +1,24 @@
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
+ - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile
+ - sudo ceph osd pool set datapool allow_ec_overwrites true
+ - rbd pool init datapool
+
+overrides:
+ thrashosds:
+ bdev_inject_crash: 2
+ bdev_inject_crash_probability: .5
+ ceph:
+ fs: xfs
+ conf:
+ client:
+ rbd default data pool: datapool
+ osd: # force bluestore since it's required for ec overwrites
+ osd objectstore: bluestore
+ bluestore block size: 96636764160
+ enable experimental unrecoverable data corrupting features: "*"
+ osd debug randomize hobject sort order: false
+# this doesn't work with failures bc the log writes are not atomic across the two backends
+# bluestore bluefs env mirror: true
diff --git a/qa/suites/rbd/encryption/pool/none.yaml b/qa/suites/rbd/encryption/pool/none.yaml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/qa/suites/rbd/encryption/pool/none.yaml
diff --git a/qa/suites/rbd/encryption/pool/replicated-data-pool.yaml b/qa/suites/rbd/encryption/pool/replicated-data-pool.yaml
new file mode 100644
index 000000000..c5647dba1
--- /dev/null
+++ b/qa/suites/rbd/encryption/pool/replicated-data-pool.yaml
@@ -0,0 +1,11 @@
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create datapool 4
+ - rbd pool init datapool
+
+overrides:
+ ceph:
+ conf:
+ client:
+ rbd default data pool: datapool
diff --git a/qa/suites/rbd/encryption/pool/small-cache-pool.yaml b/qa/suites/rbd/encryption/pool/small-cache-pool.yaml
new file mode 100644
index 000000000..bad95eadd
--- /dev/null
+++ b/qa/suites/rbd/encryption/pool/small-cache-pool.yaml
@@ -0,0 +1,17 @@
+overrides:
+ ceph:
+ log-ignorelist:
+ - overall HEALTH_
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(CACHE_POOL_NO_HIT_SET\)
+tasks:
+- exec:
+ client.0:
+ - sudo ceph osd pool create cache 4
+ - sudo ceph osd tier add rbd cache
+ - sudo ceph osd tier cache-mode cache writeback
+ - sudo ceph osd tier set-overlay rbd cache
+ - sudo ceph osd pool set cache hit_set_type bloom
+ - sudo ceph osd pool set cache hit_set_count 8
+ - sudo ceph osd pool set cache hit_set_period 60
+ - sudo ceph osd pool set cache target_max_objects 250