summaryrefslogtreecommitdiffstats
path: root/qa/standalone/crush
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /qa/standalone/crush
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/standalone/crush')
-rwxr-xr-xqa/standalone/crush/crush-choose-args.sh243
-rwxr-xr-xqa/standalone/crush/crush-classes.sh265
2 files changed, 508 insertions, 0 deletions
diff --git a/qa/standalone/crush/crush-choose-args.sh b/qa/standalone/crush/crush-choose-args.sh
new file mode 100755
index 000000000..ee548db12
--- /dev/null
+++ b/qa/standalone/crush/crush-choose-args.sh
@@ -0,0 +1,243 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2017 Red Hat <contact@redhat.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7131" # git grep '\<7131\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+ CEPH_ARGS+="--crush-location=root=default,host=HOST "
+ CEPH_ARGS+="--osd-crush-initial-weight=3 "
+ #
+ # Disable device auto class feature for now.
+ # The device class is non-deterministic and will
+ # crash the crushmap comparison below.
+ #
+ CEPH_ARGS+="--osd-class-update-on-start=false "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_choose_args_update() {
+ #
+ # adding a weighted OSD updates the weight up to the top
+ #
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+
+ ceph osd set-require-min-compat-client luminous
+ ceph osd getcrushmap > $dir/map || return 1
+ crushtool -d $dir/map -o $dir/map.txt || return 1
+ sed -i -e '/end crush map/d' $dir/map.txt
+ cat >> $dir/map.txt <<EOF
+# choose_args
+choose_args 0 {
+ {
+ bucket_id -1
+ weight_set [
+ [ 2.00000 ]
+ [ 2.00000 ]
+ ]
+ ids [ -10 ]
+ }
+ {
+ bucket_id -2
+ weight_set [
+ [ 2.00000 ]
+ [ 2.00000 ]
+ ]
+ ids [ -20 ]
+ }
+}
+
+# end crush map
+EOF
+ crushtool -c $dir/map.txt -o $dir/map-new || return 1
+ ceph osd setcrushmap -i $dir/map-new || return 1
+ ceph osd crush tree
+
+ run_osd $dir 1 || return 1
+ ceph osd crush tree
+ ceph osd getcrushmap > $dir/map-one-more || return 1
+ crushtool -d $dir/map-one-more -o $dir/map-one-more.txt || return 1
+ cat $dir/map-one-more.txt
+ diff -u $dir/map-one-more.txt $CEPH_ROOT/src/test/crush/crush-choose-args-expected-one-more-3.txt || return 1
+
+ destroy_osd $dir 1 || return 1
+ ceph osd crush tree
+ ceph osd getcrushmap > $dir/map-one-less || return 1
+ crushtool -d $dir/map-one-less -o $dir/map-one-less.txt || return 1
+ diff -u $dir/map-one-less.txt $dir/map.txt || return 1
+}
+
+function TEST_no_update_weight_set() {
+ #
+ # adding a zero weight OSD does not update the weight set at all
+ #
+ local dir=$1
+
+ ORIG_CEPH_ARGS="$CEPH_ARGS"
+ CEPH_ARGS+="--osd-crush-update-weight-set=false "
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+
+ ceph osd set-require-min-compat-client luminous
+ ceph osd crush tree
+ ceph osd getcrushmap > $dir/map || return 1
+ crushtool -d $dir/map -o $dir/map.txt || return 1
+ sed -i -e '/end crush map/d' $dir/map.txt
+ cat >> $dir/map.txt <<EOF
+# choose_args
+choose_args 0 {
+ {
+ bucket_id -1
+ weight_set [
+ [ 2.00000 ]
+ [ 1.00000 ]
+ ]
+ ids [ -10 ]
+ }
+ {
+ bucket_id -2
+ weight_set [
+ [ 2.00000 ]
+ [ 1.00000 ]
+ ]
+ ids [ -20 ]
+ }
+}
+
+# end crush map
+EOF
+ crushtool -c $dir/map.txt -o $dir/map-new || return 1
+ ceph osd setcrushmap -i $dir/map-new || return 1
+ ceph osd crush tree
+
+
+ run_osd $dir 1 || return 1
+ ceph osd crush tree
+ ceph osd getcrushmap > $dir/map-one-more || return 1
+ crushtool -d $dir/map-one-more -o $dir/map-one-more.txt || return 1
+ cat $dir/map-one-more.txt
+ diff -u $dir/map-one-more.txt $CEPH_ROOT/src/test/crush/crush-choose-args-expected-one-more-0.txt || return 1
+
+ destroy_osd $dir 1 || return 1
+ ceph osd crush tree
+ ceph osd getcrushmap > $dir/map-one-less || return 1
+ crushtool -d $dir/map-one-less -o $dir/map-one-less.txt || return 1
+ diff -u $dir/map-one-less.txt $dir/map.txt || return 1
+
+ CEPH_ARGS="$ORIG_CEPH_ARGS"
+}
+
+function TEST_reweight() {
+ # reweight and reweight-compat behave appropriately
+ local dir=$1
+
+ ORIG_CEPH_ARGS="$CEPH_ARGS"
+ CEPH_ARGS+="--osd-crush-update-weight-set=false "
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+
+ ceph osd crush weight-set create-compat || return 1
+ ceph osd crush tree
+
+ ceph osd crush weight-set reweight-compat osd.0 2 || return 1
+ ceph osd crush tree
+ ceph osd crush tree | grep host | grep '6.00000 5.00000' || return 1
+
+ run_osd $dir 2 || return 1
+ ceph osd crush tree
+ ceph osd crush tree | grep host | grep '9.00000 5.00000' || return 1
+
+ ceph osd crush reweight osd.2 4
+ ceph osd crush tree
+ ceph osd crush tree | grep host | grep '10.00000 5.00000' || return 1
+
+ ceph osd crush weight-set reweight-compat osd.2 4
+ ceph osd crush tree
+ ceph osd crush tree | grep host | grep '10.00000 9.00000' || return 1
+}
+
+function TEST_move_bucket() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+
+ ceph osd crush weight-set create-compat || return 1
+ ceph osd crush weight-set reweight-compat osd.0 2 || return 1
+ ceph osd crush weight-set reweight-compat osd.1 2 || return 1
+ ceph osd crush tree
+ ceph osd crush tree | grep HOST | grep '6.00000 4.00000' || return 1
+
+ # moving a bucket adjusts the weights
+ ceph osd crush add-bucket RACK rack root=default || return 1
+ ceph osd crush move HOST rack=RACK || return 1
+ ceph osd crush tree
+ ceph osd crush tree | grep HOST | grep '6.00000 4.00000' || return 1
+ ceph osd crush tree | grep RACK | grep '6.00000 4.00000' || return 1
+
+ # weight-set reweight adjusts containing buckets
+ ceph osd crush weight-set reweight-compat osd.0 1 || return 1
+ ceph osd crush tree
+ ceph osd crush tree | grep HOST | grep '6.00000 3.00000' || return 1
+ ceph osd crush tree | grep RACK | grep '6.00000 3.00000' || return 1
+
+ # moving a leaf resets its weight-set to the canonical weight...
+ ceph config set mon osd_crush_update_weight_set true || return 1
+ ceph osd crush add-bucket FOO host root=default || return 1
+ ceph osd crush move osd.0 host=FOO || return 1
+ ceph osd crush tree
+ ceph osd crush tree | grep osd.0 | grep '3.00000 3.00000' || return 1
+ ceph osd crush tree | grep HOST | grep '3.00000 2.00000' || return 1
+ ceph osd crush tree | grep RACK | grep '3.00000 2.00000' || return 1
+
+ # ...or to zero.
+ ceph config set mon osd_crush_update_weight_set false || return 1
+ ceph osd crush move osd.1 host=FOO || return 1
+ ceph osd crush tree
+ ceph osd crush tree | grep osd.0 | grep '3.00000 3.00000' || return 1
+ ceph osd crush tree | grep osd.1 | grep '3.00000 0' || return 1
+ ceph osd crush tree | grep FOO | grep '6.00000 3.00000' || return 1
+}
+
+main crush-choose-args "$@"
+
+# Local Variables:
+# compile-command: "cd ../../../build ; ln -sf ../src/ceph-disk/ceph_disk/main.py bin/ceph-disk && make -j4 && ../src/test/crush/crush-choose-args.sh"
+# End:
diff --git a/qa/standalone/crush/crush-classes.sh b/qa/standalone/crush/crush-classes.sh
new file mode 100755
index 000000000..558aabe6d
--- /dev/null
+++ b/qa/standalone/crush/crush-classes.sh
@@ -0,0 +1,265 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2017 Red Hat <contact@redhat.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7130" # git grep '\<7130\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+ #
+ # Disable auto-class, so we can inject device class manually below
+ #
+ CEPH_ARGS+="--osd-class-update-on-start=false "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function add_something() {
+ local dir=$1
+ local obj=${2:-SOMETHING}
+
+ local payload=ABCDEF
+ echo $payload > $dir/ORIGINAL
+ rados --pool rbd put $obj $dir/ORIGINAL || return 1
+}
+
+function get_osds_up() {
+ local poolname=$1
+ local objectname=$2
+
+ local osds=$(ceph --format xml osd map $poolname $objectname 2>/dev/null | \
+ $XMLSTARLET sel -t -m "//up/osd" -v . -o ' ')
+ # get rid of the trailing space
+ echo $osds
+}
+
+function TEST_reweight_vs_classes() {
+ local dir=$1
+
+ # CrushWrapper::update_item (and ceph osd crush set) must rebuild the shadow
+ # tree too. https://tracker.ceph.com/issues/48065
+
+ run_mon $dir a || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+
+ ceph osd crush set-device-class ssd osd.0 || return 1
+ ceph osd crush class ls-osd ssd | grep 0 || return 1
+ ceph osd crush set-device-class ssd osd.1 || return 1
+ ceph osd crush class ls-osd ssd | grep 1 || return 1
+
+ ceph osd crush reweight osd.0 1
+
+ h=`hostname -s`
+ ceph osd crush dump | jq ".buckets[] | select(.name==\"$h\") | .items[0].weight" | grep 65536
+ ceph osd crush dump | jq ".buckets[] | select(.name==\"$h~ssd\") | .items[0].weight" | grep 65536
+
+ ceph osd crush set 0 2 host=$h
+
+ ceph osd crush dump | jq ".buckets[] | select(.name==\"$h\") | .items[0].weight" | grep 131072
+ ceph osd crush dump | jq ".buckets[] | select(.name==\"$h~ssd\") | .items[0].weight" | grep 131072
+}
+
+function TEST_classes() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ create_rbd_pool || return 1
+
+ test "$(get_osds_up rbd SOMETHING)" == "1 2 0" || return 1
+ add_something $dir SOMETHING || return 1
+
+ #
+ # osd.0 has class ssd and the rule is modified
+ # to only take ssd devices.
+ #
+ ceph osd getcrushmap > $dir/map || return 1
+ crushtool -d $dir/map -o $dir/map.txt || return 1
+ ${SED} -i \
+ -e '/device 0 osd.0/s/$/ class ssd/' \
+ -e '/step take default/s/$/ class ssd/' \
+ $dir/map.txt || return 1
+ crushtool -c $dir/map.txt -o $dir/map-new || return 1
+ ceph osd setcrushmap -i $dir/map-new || return 1
+
+ #
+ # There can only be one mapping since there only is
+ # one device with ssd class.
+ #
+ ok=false
+ for delay in 2 4 8 16 32 64 128 256 ; do
+ if test "$(get_osds_up rbd SOMETHING_ELSE)" == "0" ; then
+ ok=true
+ break
+ fi
+ sleep $delay
+ ceph osd dump # for debugging purposes
+ ceph pg dump # for debugging purposes
+ done
+ $ok || return 1
+ #
+ # Writing keeps working because the pool is min_size 1 by
+ # default.
+ #
+ add_something $dir SOMETHING_ELSE || return 1
+
+ #
+ # Sanity check that the rule indeed has ssd
+ # generated bucket with a name including ~ssd.
+ #
+ ceph osd crush dump | grep -q '~ssd' || return 1
+}
+
+function TEST_set_device_class() {
+ local dir=$1
+
+ TEST_classes $dir || return 1
+
+ ceph osd crush set-device-class ssd osd.0 || return 1
+ ceph osd crush class ls-osd ssd | grep 0 || return 1
+ ceph osd crush set-device-class ssd osd.1 || return 1
+ ceph osd crush class ls-osd ssd | grep 1 || return 1
+ ceph osd crush set-device-class ssd 0 1 || return 1 # should be idempotent
+
+ ok=false
+ for delay in 2 4 8 16 32 64 128 256 ; do
+ if test "$(get_osds_up rbd SOMETHING_ELSE)" == "0 1" ; then
+ ok=true
+ break
+ fi
+ sleep $delay
+ ceph osd crush dump
+ ceph osd dump # for debugging purposes
+ ceph pg dump # for debugging purposes
+ done
+ $ok || return 1
+}
+
+function TEST_mon_classes() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ create_rbd_pool || return 1
+
+ test "$(get_osds_up rbd SOMETHING)" == "1 2 0" || return 1
+ add_something $dir SOMETHING || return 1
+
+ # test create and remove class
+ ceph osd crush class create CLASS || return 1
+ ceph osd crush class create CLASS || return 1 # idempotent
+ ceph osd crush class ls | grep CLASS || return 1
+ ceph osd crush class rename CLASS TEMP || return 1
+ ceph osd crush class ls | grep TEMP || return 1
+ ceph osd crush class rename TEMP CLASS || return 1
+ ceph osd crush class ls | grep CLASS || return 1
+ ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd crush-device-class=CLASS || return 1
+ expect_failure $dir EBUSY ceph osd crush class rm CLASS || return 1
+ ceph osd erasure-code-profile rm myprofile || return 1
+ ceph osd crush class rm CLASS || return 1
+ ceph osd crush class rm CLASS || return 1 # test idempotence
+
+ # test rm-device-class
+ ceph osd crush set-device-class aaa osd.0 || return 1
+ ceph osd tree | grep -q 'aaa' || return 1
+ ceph osd crush dump | grep -q '~aaa' || return 1
+ ceph osd crush tree --show-shadow | grep -q '~aaa' || return 1
+ ceph osd crush set-device-class bbb osd.1 || return 1
+ ceph osd tree | grep -q 'bbb' || return 1
+ ceph osd crush dump | grep -q '~bbb' || return 1
+ ceph osd crush tree --show-shadow | grep -q '~bbb' || return 1
+ ceph osd crush set-device-class ccc osd.2 || return 1
+ ceph osd tree | grep -q 'ccc' || return 1
+ ceph osd crush dump | grep -q '~ccc' || return 1
+ ceph osd crush tree --show-shadow | grep -q '~ccc' || return 1
+ ceph osd crush rm-device-class 0 || return 1
+ ceph osd tree | grep -q 'aaa' && return 1
+ ceph osd crush class ls | grep -q 'aaa' && return 1 # class 'aaa' should gone
+ ceph osd crush rm-device-class 1 || return 1
+ ceph osd tree | grep -q 'bbb' && return 1
+ ceph osd crush class ls | grep -q 'bbb' && return 1 # class 'bbb' should gone
+ ceph osd crush rm-device-class 2 || return 1
+ ceph osd tree | grep -q 'ccc' && return 1
+ ceph osd crush class ls | grep -q 'ccc' && return 1 # class 'ccc' should gone
+ ceph osd crush set-device-class asdf all || return 1
+ ceph osd tree | grep -q 'asdf' || return 1
+ ceph osd crush dump | grep -q '~asdf' || return 1
+ ceph osd crush tree --show-shadow | grep -q '~asdf' || return 1
+ ceph osd crush rule create-replicated asdf-rule default host asdf || return 1
+ ceph osd crush rm-device-class all || return 1
+ ceph osd tree | grep -q 'asdf' && return 1
+ ceph osd crush class ls | grep -q 'asdf' || return 1 # still referenced by asdf-rule
+
+ ceph osd crush set-device-class abc osd.2 || return 1
+ ceph osd crush move osd.2 root=foo rack=foo-rack host=foo-host || return 1
+ out=`ceph osd tree |awk '$1 == 2 && $2 == "abc" {print $0}'`
+ if [ "$out" == "" ]; then
+ return 1
+ fi
+
+ # verify 'crush move' too
+ ceph osd crush dump | grep -q 'foo~abc' || return 1
+ ceph osd crush tree --show-shadow | grep -q 'foo~abc' || return 1
+ ceph osd crush dump | grep -q 'foo-rack~abc' || return 1
+ ceph osd crush tree --show-shadow | grep -q 'foo-rack~abc' || return 1
+ ceph osd crush dump | grep -q 'foo-host~abc' || return 1
+ ceph osd crush tree --show-shadow | grep -q 'foo-host~abc' || return 1
+ ceph osd crush rm-device-class osd.2 || return 1
+ # restore class, so we can continue to test create-replicated
+ ceph osd crush set-device-class abc osd.2 || return 1
+
+ ceph osd crush rule create-replicated foo-rule foo host abc || return 1
+
+ # test set-device-class implicitly change class
+ ceph osd crush set-device-class hdd osd.0 || return 1
+ expect_failure $dir EBUSY ceph osd crush set-device-class nvme osd.0 || return 1
+
+ # test class rename
+ ceph osd crush rm-device-class all || return 1
+ ceph osd crush set-device-class class_1 all || return 1
+ ceph osd crush class ls | grep 'class_1' || return 1
+ ceph osd crush tree --show-shadow | grep 'class_1' || return 1
+ ceph osd crush rule create-replicated class_1_rule default host class_1 || return 1
+ ceph osd crush class rename class_1 class_2
+ ceph osd crush class rename class_1 class_2 # idempotent
+ ceph osd crush class ls | grep 'class_1' && return 1
+ ceph osd crush tree --show-shadow | grep 'class_1' && return 1
+ ceph osd crush class ls | grep 'class_2' || return 1
+ ceph osd crush tree --show-shadow | grep 'class_2' || return 1
+}
+
+main crush-classes "$@"
+
+# Local Variables:
+# compile-command: "cd ../../../build ; ln -sf ../src/ceph-disk/ceph_disk/main.py bin/ceph-disk && make -j4 && ../src/test/crush/crush-classes.sh"
+# End: