summaryrefslogtreecommitdiffstats
path: root/qa/workunits/rbd/krbd_huge_osdmap.sh
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
commit19fcec84d8d7d21e796c7624e521b60d28ee21ed (patch)
tree42d26aa27d1e3f7c0b8bd3fd14e7d7082f5008dc /qa/workunits/rbd/krbd_huge_osdmap.sh
parentInitial commit. (diff)
downloadceph-upstream/16.2.11+ds.tar.xz
ceph-upstream/16.2.11+ds.zip
Adding upstream version 16.2.11+ds.upstream/16.2.11+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/workunits/rbd/krbd_huge_osdmap.sh')
-rwxr-xr-xqa/workunits/rbd/krbd_huge_osdmap.sh51
1 files changed, 51 insertions, 0 deletions
diff --git a/qa/workunits/rbd/krbd_huge_osdmap.sh b/qa/workunits/rbd/krbd_huge_osdmap.sh
new file mode 100755
index 000000000..0a550d674
--- /dev/null
+++ b/qa/workunits/rbd/krbd_huge_osdmap.sh
@@ -0,0 +1,51 @@
+#!/usr/bin/env bash
+
+# This is a test for https://tracker.ceph.com/issues/40481.
+#
+# An osdmap with 60000 slots encodes to ~16M, of which the ignored portion
+# is ~13M. However in-memory osdmap is larger than ~3M: in-memory osd_addr
+# array for 60000 OSDs is ~8M because of sockaddr_storage.
+#
+# Set mon_max_osd = 60000 in ceph.conf.
+
+set -ex
+
+function expect_false() {
+ if "$@"; then return 1; else return 0; fi
+}
+
+function run_test() {
+ local dev
+
+ # initially tiny, grow via incrementals
+ dev=$(sudo rbd map img)
+ for max in 8 60 600 6000 60000; do
+ ceph osd setmaxosd $max
+ expect_false sudo rbd map wait_for/latest_osdmap
+ xfs_io -c 'pwrite -w 0 12M' $DEV
+ done
+ ceph osd getcrushmap -o /dev/stdout | ceph osd setcrushmap -i /dev/stdin
+ expect_false sudo rbd map wait_for/latest_osdmap
+ xfs_io -c 'pwrite -w 0 12M' $DEV
+ sudo rbd unmap $dev
+
+ # initially huge, shrink via incrementals
+ dev=$(sudo rbd map img)
+ for max in 60000 6000 600 60 8; do
+ ceph osd setmaxosd $max
+ expect_false sudo rbd map wait_for/latest_osdmap
+ xfs_io -c 'pwrite -w 0 12M' $DEV
+ done
+ ceph osd getcrushmap -o /dev/stdout | ceph osd setcrushmap -i /dev/stdin
+ expect_false sudo rbd map wait_for/latest_osdmap
+ xfs_io -c 'pwrite -w 0 12M' $DEV
+ sudo rbd unmap $dev
+}
+
+rbd create --size 12M img
+run_test
+# repeat with primary affinity (adds an extra array)
+ceph osd primary-affinity osd.0 0.5
+run_test
+
+echo OK