summaryrefslogtreecommitdiffstats
path: root/qa/standalone/mon
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /qa/standalone/mon
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/standalone/mon')
-rwxr-xr-xqa/standalone/mon/health-mute.sh124
-rwxr-xr-xqa/standalone/mon/misc.sh284
-rwxr-xr-xqa/standalone/mon/mkfs.sh193
-rwxr-xr-xqa/standalone/mon/mon-bind.sh143
-rwxr-xr-xqa/standalone/mon/mon-created-time.sh54
-rwxr-xr-xqa/standalone/mon/mon-handle-forward.sh64
-rwxr-xr-xqa/standalone/mon/mon-last-epoch-clean.sh307
-rwxr-xr-xqa/standalone/mon/mon-osdmap-prune.sh57
-rwxr-xr-xqa/standalone/mon/mon-ping.sh46
-rwxr-xr-xqa/standalone/mon/mon-scrub.sh49
-rwxr-xr-xqa/standalone/mon/mon-seesaw.sh72
-rwxr-xr-xqa/standalone/mon/osd-crush.sh196
-rwxr-xr-xqa/standalone/mon/osd-df.sh97
-rwxr-xr-xqa/standalone/mon/osd-erasure-code-profile.sh240
-rwxr-xr-xqa/standalone/mon/osd-pool-create.sh307
-rwxr-xr-xqa/standalone/mon/osd-pool-df.sh76
-rwxr-xr-xqa/standalone/mon/test_pool_quota.sh63
17 files changed, 2372 insertions, 0 deletions
diff --git a/qa/standalone/mon/health-mute.sh b/qa/standalone/mon/health-mute.sh
new file mode 100755
index 000000000..d8e07ca06
--- /dev/null
+++ b/qa/standalone/mon/health-mute.sh
@@ -0,0 +1,124 @@
+#!/bin/bash
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7143" # git grep '\<714\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none --mon-pg-warn-min-per-osd 0 --mon-max-pg-per-osd 1000 "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_mute() {
+ local dir=$1
+ setup $dir || return 1
+
+ set -o pipefail
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+
+ ceph osd pool create foo 8
+ ceph osd pool application enable foo rbd --yes-i-really-mean-it
+ wait_for_clean || return 1
+
+ ceph -s
+ ceph health | grep HEALTH_OK || return 1
+ # test warning on setting pool size=1
+ ceph osd pool set foo size 1 --yes-i-really-mean-it
+ ceph -s
+ ceph health | grep HEALTH_WARN || return 1
+ ceph health detail | grep POOL_NO_REDUNDANCY || return 1
+ ceph health mute POOL_NO_REDUNDANCY
+ ceph -s
+ ceph health | grep HEALTH_OK | grep POOL_NO_REDUNDANCY || return 1
+ ceph health unmute POOL_NO_REDUNDANCY
+ ceph -s
+ ceph health | grep HEALTH_WARN || return 1
+ # restore pool size to default
+ ceph osd pool set foo size 3
+ ceph -s
+ ceph health | grep HEALTH_OK || return 1
+ ceph osd set noup
+ ceph -s
+ ceph health detail | grep OSDMAP_FLAGS || return 1
+ ceph osd down 0
+ ceph -s
+ ceph health detail | grep OSD_DOWN || return 1
+ ceph health detail | grep HEALTH_WARN || return 1
+
+ ceph health mute OSD_DOWN
+ ceph health mute OSDMAP_FLAGS
+ ceph -s
+ ceph health | grep HEALTH_OK | grep OSD_DOWN | grep OSDMAP_FLAGS || return 1
+ ceph health unmute OSD_DOWN
+ ceph -s
+ ceph health | grep HEALTH_WARN || return 1
+
+ # ttl
+ ceph health mute OSD_DOWN 10s
+ ceph -s
+ ceph health | grep HEALTH_OK || return 1
+ sleep 15
+ ceph -s
+ ceph health | grep HEALTH_WARN || return 1
+
+ # sticky
+ ceph health mute OSDMAP_FLAGS --sticky
+ ceph osd unset noup
+ sleep 5
+ ceph -s
+ ceph health | grep OSDMAP_FLAGS || return 1
+ ceph osd set noup
+ ceph -s
+ ceph health | grep HEALTH_OK || return 1
+
+ # rachet down on OSD_DOWN count
+ ceph osd down 0 1
+ ceph -s
+ ceph health detail | grep OSD_DOWN || return 1
+
+ ceph health mute OSD_DOWN
+ kill_daemons $dir TERM osd.0
+ ceph osd unset noup
+ sleep 10
+ ceph -s
+ ceph health detail | grep OSD_DOWN || return 1
+ ceph health detail | grep '1 osds down' || return 1
+ ceph health | grep HEALTH_OK || return 1
+
+ sleep 10 # give time for mon tick to rachet the mute
+ ceph osd set noup
+ ceph health mute OSDMAP_FLAGS
+ ceph -s
+ ceph health detail
+ ceph health | grep HEALTH_OK || return 1
+
+ ceph osd down 1
+ ceph -s
+ ceph health detail
+ ceph health detail | grep '2 osds down' || return 1
+
+ sleep 10 # give time for mute to clear
+ ceph -s
+ ceph health detail
+ ceph health | grep HEALTH_WARN || return 1
+ ceph health detail | grep '2 osds down' || return 1
+
+ teardown $dir || return 1
+}
+
+main health-mute "$@"
diff --git a/qa/standalone/mon/misc.sh b/qa/standalone/mon/misc.sh
new file mode 100755
index 000000000..c7fc6d441
--- /dev/null
+++ b/qa/standalone/mon/misc.sh
@@ -0,0 +1,284 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
+# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7102" # git grep '\<7102\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ $func $dir || return 1
+ done
+}
+
+TEST_POOL=rbd
+
+function TEST_osd_pool_get_set() {
+ local dir=$1
+
+ setup $dir || return 1
+ run_mon $dir a || return 1
+ create_pool $TEST_POOL 8
+
+ local flag
+ for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do
+ ceph osd pool set $TEST_POOL $flag 0 || return 1
+ ! ceph osd dump | grep 'pool ' | grep $flag || return 1
+ ceph osd pool set $TEST_POOL $flag 1 || return 1
+ ceph osd dump | grep 'pool ' | grep $flag || return 1
+ ceph osd pool set $TEST_POOL $flag false || return 1
+ ! ceph osd dump | grep 'pool ' | grep $flag || return 1
+ ceph osd pool set $TEST_POOL $flag false || return 1
+ # check that setting false twice does not toggle to true (bug)
+ ! ceph osd dump | grep 'pool ' | grep $flag || return 1
+ ceph osd pool set $TEST_POOL $flag true || return 1
+ ceph osd dump | grep 'pool ' | grep $flag || return 1
+ # cleanup
+ ceph osd pool set $TEST_POOL $flag 0 || return 1
+ done
+
+ local size=$(ceph osd pool get $TEST_POOL size|awk '{print $2}')
+ local min_size=$(ceph osd pool get $TEST_POOL min_size|awk '{print $2}')
+ local expected_min_size=$(expr $size - $size / 2)
+ if [ $min_size -ne $expected_min_size ]; then
+ echo "default min_size is wrong: expected $expected_min_size, got $min_size"
+ return 1
+ fi
+
+ ceph osd pool set $TEST_POOL scrub_min_interval 123456 || return 1
+ ceph osd dump | grep 'pool ' | grep 'scrub_min_interval 123456' || return 1
+ ceph osd pool set $TEST_POOL scrub_min_interval 0 || return 1
+ ceph osd dump | grep 'pool ' | grep 'scrub_min_interval' && return 1
+ ceph osd pool set $TEST_POOL scrub_max_interval 123456 || return 1
+ ceph osd dump | grep 'pool ' | grep 'scrub_max_interval 123456' || return 1
+ ceph osd pool set $TEST_POOL scrub_max_interval 0 || return 1
+ ceph osd dump | grep 'pool ' | grep 'scrub_max_interval' && return 1
+ ceph osd pool set $TEST_POOL deep_scrub_interval 123456 || return 1
+ ceph osd dump | grep 'pool ' | grep 'deep_scrub_interval 123456' || return 1
+ ceph osd pool set $TEST_POOL deep_scrub_interval 0 || return 1
+ ceph osd dump | grep 'pool ' | grep 'deep_scrub_interval' && return 1
+
+ #replicated pool size restrict in 1 and 10
+ ! ceph osd pool set $TEST_POOL 11 || return 1
+ #replicated pool min_size must be between in 1 and size
+ ! ceph osd pool set $TEST_POOL min_size $(expr $size + 1) || return 1
+ ! ceph osd pool set $TEST_POOL min_size 0 || return 1
+
+ local ecpool=erasepool
+ create_pool $ecpool 12 12 erasure default || return 1
+ #erasue pool size=k+m, min_size=k
+ local size=$(ceph osd pool get $ecpool size|awk '{print $2}')
+ local min_size=$(ceph osd pool get $ecpool min_size|awk '{print $2}')
+ local k=$(expr $min_size - 1) # default min_size=k+1
+ #erasure pool size can't change
+ ! ceph osd pool set $ecpool size $(expr $size + 1) || return 1
+ #erasure pool min_size must be between in k and size
+ ceph osd pool set $ecpool min_size $(expr $k + 1) || return 1
+ ! ceph osd pool set $ecpool min_size $(expr $k - 1) || return 1
+ ! ceph osd pool set $ecpool min_size $(expr $size + 1) || return 1
+
+ teardown $dir || return 1
+}
+
+function TEST_mon_add_to_single_mon() {
+ local dir=$1
+
+ fsid=$(uuidgen)
+ MONA=127.0.0.1:7117 # git grep '\<7117\>' : there must be only one
+ MONB=127.0.0.1:7118 # git grep '\<7118\>' : there must be only one
+ CEPH_ARGS_orig=$CEPH_ARGS
+ CEPH_ARGS="--fsid=$fsid --auth-supported=none "
+ CEPH_ARGS+="--mon-initial-members=a "
+ CEPH_ARGS+="--mon-host=$MONA "
+
+ setup $dir || return 1
+ run_mon $dir a --public-addr $MONA || return 1
+ # wait for the quorum
+ timeout 120 ceph -s > /dev/null || return 1
+ run_mon $dir b --public-addr $MONB || return 1
+ teardown $dir || return 1
+
+ setup $dir || return 1
+ run_mon $dir a --public-addr $MONA || return 1
+ # without the fix of #5454, mon.a will assert failure at seeing the MMonJoin
+ # from mon.b
+ run_mon $dir b --public-addr $MONB || return 1
+ # make sure mon.b get's it's join request in first, then
+ sleep 2
+ # wait for the quorum
+ timeout 120 ceph -s > /dev/null || return 1
+ ceph mon dump
+ ceph mon dump -f json-pretty
+ local num_mons
+ num_mons=$(ceph mon dump --format=json 2>/dev/null | jq ".mons | length") || return 1
+ [ $num_mons == 2 ] || return 1
+ # no reason to take more than 120 secs to get this submitted
+ timeout 120 ceph mon add b $MONB || return 1
+ teardown $dir || return 1
+}
+
+function TEST_no_segfault_for_bad_keyring() {
+ local dir=$1
+ setup $dir || return 1
+ # create a client.admin key and add it to ceph.mon.keyring
+ ceph-authtool --create-keyring $dir/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
+ ceph-authtool --create-keyring $dir/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *'
+ ceph-authtool $dir/ceph.mon.keyring --import-keyring $dir/ceph.client.admin.keyring
+ CEPH_ARGS_TMP="--fsid=$(uuidgen) --mon-host=127.0.0.1:7102 --auth-supported=cephx "
+ CEPH_ARGS_orig=$CEPH_ARGS
+ CEPH_ARGS="$CEPH_ARGS_TMP --keyring=$dir/ceph.mon.keyring "
+ run_mon $dir a
+ # create a bad keyring and make sure no segfault occurs when using the bad keyring
+ echo -e "[client.admin]\nkey = BQAUlgtWoFePIxAAQ9YLzJSVgJX5V1lh5gyctg==" > $dir/bad.keyring
+ CEPH_ARGS="$CEPH_ARGS_TMP --keyring=$dir/bad.keyring"
+ ceph osd dump 2> /dev/null
+ # 139(11|128) means segfault and core dumped
+ [ $? -eq 139 ] && return 1
+ CEPH_ARGS=$CEPH_ARGS_orig
+ teardown $dir || return 1
+}
+
+function TEST_mon_features() {
+ local dir=$1
+ setup $dir || return 1
+
+ fsid=$(uuidgen)
+ MONA=127.0.0.1:7127 # git grep '\<7127\>' ; there must be only one
+ MONB=127.0.0.1:7128 # git grep '\<7128\>' ; there must be only one
+ MONC=127.0.0.1:7129 # git grep '\<7129\>' ; there must be only one
+ CEPH_ARGS_orig=$CEPH_ARGS
+ CEPH_ARGS="--fsid=$fsid --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$MONA,$MONB,$MONC "
+ CEPH_ARGS+="--mon-debug-no-initial-persistent-features "
+ CEPH_ARGS+="--mon-debug-no-require-reef "
+
+ run_mon $dir a --public-addr $MONA || return 1
+ run_mon $dir b --public-addr $MONB || return 1
+ timeout 120 ceph -s > /dev/null || return 1
+
+ # expect monmap to contain 3 monitors (a, b, and c)
+ jqinput="$(ceph quorum_status --format=json 2>/dev/null)"
+ jq_success "$jqinput" '.monmap.mons | length == 3' || return 1
+ # quorum contains two monitors
+ jq_success "$jqinput" '.quorum | length == 2' || return 1
+ # quorum's monitor features contain kraken, luminous, mimic, nautilus,
+ # octopus, pacific, quincy
+ jqfilter='.features.quorum_mon[]|select(. == "kraken")'
+ jq_success "$jqinput" "$jqfilter" "kraken" || return 1
+ jqfilter='.features.quorum_mon[]|select(. == "luminous")'
+ jq_success "$jqinput" "$jqfilter" "luminous" || return 1
+ jqfilter='.features.quorum_mon[]|select(. == "mimic")'
+ jq_success "$jqinput" "$jqfilter" "mimic" || return 1
+ jqfilter='.features.quorum_mon[]|select(. == "nautilus")'
+ jq_success "$jqinput" "$jqfilter" "nautilus" || return 1
+ jqfilter='.features.quorum_mon[]|select(. == "octopus")'
+ jq_success "$jqinput" "$jqfilter" "octopus" || return 1
+ jqfilter='.features.quorum_mon[]|select(. == "pacific")'
+ jq_success "$jqinput" "$jqfilter" "pacific" || return 1
+ jqfilter='.features.quorum_mon[]|select(. == "quincy")'
+ jq_success "$jqinput" "$jqfilter" "quincy" || return 1
+ jqfilter='.features.quorum_mon[]|select(. == "reef")'
+ jq_success "$jqinput" "$jqfilter" "reef" || return 1
+
+ # monmap must have no persistent features set, because we
+ # don't currently have a quorum made out of all the monitors
+ # in the monmap.
+ jqfilter='.monmap.features.persistent | length == 0'
+ jq_success "$jqinput" "$jqfilter" || return 1
+
+ # nor do we have any optional features, for that matter.
+ jqfilter='.monmap.features.optional | length == 0'
+ jq_success "$jqinput" "$jqfilter" || return 1
+
+ # validate 'mon feature ls'
+
+ jqinput="$(ceph mon feature ls --format=json 2>/dev/null)"
+ # k l m n o p q are supported
+ jqfilter='.all.supported[] | select(. == "kraken")'
+ jq_success "$jqinput" "$jqfilter" "kraken" || return 1
+ jqfilter='.all.supported[] | select(. == "luminous")'
+ jq_success "$jqinput" "$jqfilter" "luminous" || return 1
+ jqfilter='.all.supported[] | select(. == "mimic")'
+ jq_success "$jqinput" "$jqfilter" "mimic" || return 1
+ jqfilter='.all.supported[] | select(. == "nautilus")'
+ jq_success "$jqinput" "$jqfilter" "nautilus" || return 1
+ jqfilter='.all.supported[] | select(. == "octopus")'
+ jq_success "$jqinput" "$jqfilter" "octopus" || return 1
+ jqfilter='.all.supported[] | select(. == "pacific")'
+ jq_success "$jqinput" "$jqfilter" "pacific" || return 1
+ jqfilter='.all.supported[] | select(. == "quincy")'
+ jq_success "$jqinput" "$jqfilter" "quincy" || return 1
+ jqfilter='.all.supported[] | select(. == "reef")'
+ jq_success "$jqinput" "$jqfilter" "reef" || return 1
+
+ # start third monitor
+ run_mon $dir c --public-addr $MONC || return 1
+
+ wait_for_quorum 300 3 || return 1
+
+ timeout 300 ceph -s > /dev/null || return 1
+
+ jqinput="$(ceph quorum_status --format=json 2>/dev/null)"
+ # expect quorum to have all three monitors
+ jqfilter='.quorum | length == 3'
+ jq_success "$jqinput" "$jqfilter" || return 1
+
+ # quorum's monitor features should have p now too
+ jqfilter='.features.quorum_mon[]|select(. == "pacific")'
+ jq_success "$jqinput" "$jqfilter" "pacific" || return 1
+
+ # persistent too
+ jqfilter='.monmap.features.persistent[]|select(. == "kraken")'
+ jq_success "$jqinput" "$jqfilter" "kraken" || return 1
+ jqfilter='.monmap.features.persistent[]|select(. == "luminous")'
+ jq_success "$jqinput" "$jqfilter" "luminous" || return 1
+ jqfilter='.monmap.features.persistent[]|select(. == "mimic")'
+ jq_success "$jqinput" "$jqfilter" "mimic" || return 1
+ jqfilter='.monmap.features.persistent[]|select(. == "osdmap-prune")'
+ jq_success "$jqinput" "$jqfilter" "osdmap-prune" || return 1
+ jqfilter='.monmap.features.persistent[]|select(. == "nautilus")'
+ jq_success "$jqinput" "$jqfilter" "nautilus" || return 1
+ jqfilter='.monmap.features.persistent[]|select(. == "octopus")'
+ jq_success "$jqinput" "$jqfilter" "octopus" || return 1
+ jqfilter='.monmap.features.persistent[]|select(. == "pacific")'
+ jq_success "$jqinput" "$jqfilter" "pacific" || return 1
+ jqfilter='.monmap.features.persistent[]|select(. == "elector-pinging")'
+ jq_success "$jqinput" "$jqfilter" "elector-pinging" || return 1
+ jqfilter='.monmap.features.persistent | length == 10'
+ jq_success "$jqinput" "$jqfilter" || return 1
+ jqfilter='.monmap.features.persistent[]|select(. == "quincy")'
+ jq_success "$jqinput" "$jqfilter" "quincy" || return 1
+ jqfilter='.monmap.features.persistent[]|select(. == "reef")'
+ jq_success "$jqinput" "$jqfilter" "reef" || return 1
+
+ CEPH_ARGS=$CEPH_ARGS_orig
+ # that's all folks. thank you for tuning in.
+ teardown $dir || return 1
+}
+
+main misc "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/mon/misc.sh"
+# End:
diff --git a/qa/standalone/mon/mkfs.sh b/qa/standalone/mon/mkfs.sh
new file mode 100755
index 000000000..6650bdb49
--- /dev/null
+++ b/qa/standalone/mon/mkfs.sh
@@ -0,0 +1,193 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2013 Cloudwatt <libre.licensing@cloudwatt.com>
+# Copyright (C) 2014 Red Hat <contact@redhat.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+set -xe
+PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: '
+
+
+DIR=mkfs
+export CEPH_CONF=/dev/null
+unset CEPH_ARGS
+MON_ID=a
+MON_DIR=$DIR/$MON_ID
+CEPH_MON=127.0.0.1:7110 # git grep '\<7110\>' : there must be only one
+TIMEOUT=360
+
+EXTRAOPTS=""
+
+function setup() {
+ teardown
+ mkdir $DIR
+}
+
+function teardown() {
+ kill_daemons
+ rm -fr $DIR
+}
+
+function mon_mkfs() {
+ local fsid=$(uuidgen)
+
+ ceph-mon \
+ --id $MON_ID \
+ --fsid $fsid \
+ $EXTRAOPTS \
+ --mkfs \
+ --mon-data=$MON_DIR \
+ --mon-initial-members=$MON_ID \
+ --mon-host=$CEPH_MON \
+ "$@"
+}
+
+function mon_run() {
+ ceph-mon \
+ --id $MON_ID \
+ --chdir= \
+ --mon-osd-full-ratio=.99 \
+ --mon-data-avail-crit=1 \
+ $EXTRAOPTS \
+ --mon-data=$MON_DIR \
+ --log-file=$MON_DIR/log \
+ --mon-cluster-log-file=$MON_DIR/log \
+ --run-dir=$MON_DIR \
+ --pid-file=$MON_DIR/pidfile \
+ --public-addr $CEPH_MON \
+ "$@"
+}
+
+function kill_daemons() {
+ for pidfile in $(find $DIR -name pidfile) ; do
+ pid=$(cat $pidfile)
+ for try in 0 1 1 1 2 3 ; do
+ kill $pid || break
+ sleep $try
+ done
+ done
+}
+
+function auth_none() {
+ mon_mkfs --auth-supported=none
+
+ ceph-mon \
+ --id $MON_ID \
+ --mon-osd-full-ratio=.99 \
+ --mon-data-avail-crit=1 \
+ $EXTRAOPTS \
+ --mon-data=$MON_DIR \
+ --extract-monmap $MON_DIR/monmap
+
+ [ -f $MON_DIR/monmap ] || return 1
+
+ [ ! -f $MON_DIR/keyring ] || return 1
+
+ mon_run --auth-supported=none
+
+ timeout $TIMEOUT ceph --mon-host $CEPH_MON mon stat || return 1
+}
+
+function auth_cephx_keyring() {
+ cat > $DIR/keyring <<EOF
+[mon.]
+ key = AQDUS79S0AF9FRAA2cgRLFscVce0gROn/s9WMg==
+ caps mon = "allow *"
+EOF
+
+ mon_mkfs --keyring=$DIR/keyring
+
+ [ -f $MON_DIR/keyring ] || return 1
+
+ mon_run
+
+ timeout $TIMEOUT ceph \
+ --name mon. \
+ --keyring $MON_DIR/keyring \
+ --mon-host $CEPH_MON mon stat || return 1
+}
+
+function auth_cephx_key() {
+ if [ -f /etc/ceph/keyring ] ; then
+ echo "Please move /etc/ceph/keyring away for testing!"
+ return 1
+ fi
+
+ local key=$(ceph-authtool --gen-print-key)
+
+ if mon_mkfs --key='corrupted key' ; then
+ return 1
+ else
+ rm -fr $MON_DIR/store.db
+ rm -fr $MON_DIR/kv_backend
+ fi
+
+ mon_mkfs --key=$key
+
+ [ -f $MON_DIR/keyring ] || return 1
+ grep $key $MON_DIR/keyring
+
+ mon_run
+
+ timeout $TIMEOUT ceph \
+ --name mon. \
+ --keyring $MON_DIR/keyring \
+ --mon-host $CEPH_MON mon stat || return 1
+}
+
+function makedir() {
+ local toodeep=$MON_DIR/toodeep
+
+ # fail if recursive directory creation is needed
+ ceph-mon \
+ --id $MON_ID \
+ --mon-osd-full-ratio=.99 \
+ --mon-data-avail-crit=1 \
+ $EXTRAOPTS \
+ --mkfs \
+ --mon-data=$toodeep 2>&1 | tee $DIR/makedir.log
+ grep 'toodeep.*No such file' $DIR/makedir.log > /dev/null
+ rm $DIR/makedir.log
+
+ # an empty directory does not mean the mon exists
+ mkdir $MON_DIR
+ mon_mkfs --auth-supported=none 2>&1 | tee $DIR/makedir.log
+ ! grep "$MON_DIR already exists" $DIR/makedir.log || return 1
+}
+
+function idempotent() {
+ mon_mkfs --auth-supported=none
+ mon_mkfs --auth-supported=none 2>&1 | tee $DIR/makedir.log
+ grep "'$MON_DIR' already exists" $DIR/makedir.log > /dev/null || return 1
+}
+
+function run() {
+ local actions
+ actions+="makedir "
+ actions+="idempotent "
+ actions+="auth_cephx_key "
+ actions+="auth_cephx_keyring "
+ actions+="auth_none "
+ for action in $actions ; do
+ setup
+ $action || return 1
+ teardown
+ done
+}
+
+run
+
+# Local Variables:
+# compile-command: "cd ../.. ; make TESTS=test/mon/mkfs.sh check"
+# End:
diff --git a/qa/standalone/mon/mon-bind.sh b/qa/standalone/mon/mon-bind.sh
new file mode 100755
index 000000000..41982b916
--- /dev/null
+++ b/qa/standalone/mon/mon-bind.sh
@@ -0,0 +1,143 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2017 Quantum Corp.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+SOCAT_PIDS=()
+
+function port_forward() {
+ local source_port=$1
+ local target_port=$2
+
+ socat TCP-LISTEN:${source_port},fork,reuseaddr TCP:localhost:${target_port} &
+ SOCAT_PIDS+=( $! )
+}
+
+function cleanup() {
+ for p in "${SOCAT_PIDS[@]}"; do
+ kill $p
+ done
+ SOCAT_PIDS=()
+}
+
+trap cleanup SIGTERM SIGKILL SIGQUIT SIGINT
+
+function run() {
+ local dir=$1
+ shift
+
+ export MON_IP=127.0.0.1
+ export MONA_PUBLIC=7132 # git grep '\<7132\>' ; there must be only one
+ export MONB_PUBLIC=7133 # git grep '\<7133\>' ; there must be only one
+ export MONC_PUBLIC=7134 # git grep '\<7134\>' ; there must be only one
+ export MONA_BIND=7135 # git grep '\<7135\>' ; there must be only one
+ export MONB_BIND=7136 # git grep '\<7136\>' ; there must be only one
+ export MONC_BIND=7137 # git grep '\<7137\>' ; there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir && cleanup || { cleanup; return 1; }
+ teardown $dir
+ done
+}
+
+function TEST_mon_client_connect_fails() {
+ local dir=$1
+
+ # start the mon with a public-bind-addr that is different
+ # from the public-addr.
+ CEPH_ARGS+="--mon-host=${MON_IP}:${MONA_PUBLIC} "
+ run_mon $dir a --mon-host=${MON_IP}:${MONA_PUBLIC} --public-bind-addr=${MON_IP}:${MONA_BIND} || return 1
+
+ # now attempt to ping it that should fail.
+ timeout 3 ceph ping mon.a || return 0
+ return 1
+}
+
+function TEST_mon_client_connect() {
+ local dir=$1
+
+ # start the mon with a public-bind-addr that is different
+ # from the public-addr.
+ CEPH_ARGS+="--mon-host=${MON_IP}:${MONA_PUBLIC} "
+ run_mon $dir a --mon-host=${MON_IP}:${MONA_PUBLIC} --public-bind-addr=${MON_IP}:${MONA_BIND} || return 1
+
+ # now forward the public port to the bind port.
+ port_forward ${MONA_PUBLIC} ${MONA_BIND}
+
+ # attempt to connect. we expect that to work
+ ceph ping mon.a || return 1
+}
+
+function TEST_mon_quorum() {
+ local dir=$1
+
+ # start the mon with a public-bind-addr that is different
+ # from the public-addr.
+ CEPH_ARGS+="--mon-host=${MON_IP}:${MONA_PUBLIC},${MON_IP}:${MONB_PUBLIC},${MON_IP}:${MONC_PUBLIC} "
+ run_mon $dir a --public-addr=${MON_IP}:${MONA_PUBLIC} --public-bind-addr=${MON_IP}:${MONA_BIND} || return 1
+ run_mon $dir b --public-addr=${MON_IP}:${MONB_PUBLIC} --public-bind-addr=${MON_IP}:${MONB_BIND} || return 1
+ run_mon $dir c --public-addr=${MON_IP}:${MONC_PUBLIC} --public-bind-addr=${MON_IP}:${MONC_BIND} || return 1
+
+ # now forward the public port to the bind port.
+ port_forward ${MONA_PUBLIC} ${MONA_BIND}
+ port_forward ${MONB_PUBLIC} ${MONB_BIND}
+ port_forward ${MONC_PUBLIC} ${MONC_BIND}
+
+ # expect monmap to contain 3 monitors (a, b, and c)
+ jqinput="$(ceph quorum_status --format=json 2>/dev/null)"
+ jq_success "$jqinput" '.monmap.mons | length == 3' || return 1
+
+ # quorum should form
+ wait_for_quorum 300 3 || return 1
+ # expect quorum to have all three monitors
+ jqfilter='.quorum | length == 3'
+ jq_success "$jqinput" "$jqfilter" || return 1
+}
+
+function TEST_put_get() {
+ local dir=$1
+
+ # start the mon with a public-bind-addr that is different
+ # from the public-addr.
+ CEPH_ARGS+="--mon-host=${MON_IP}:${MONA_PUBLIC},${MON_IP}:${MONB_PUBLIC},${MON_IP}:${MONC_PUBLIC} "
+ run_mon $dir a --public-addr=${MON_IP}:${MONA_PUBLIC} --public-bind-addr=${MON_IP}:${MONA_BIND} || return 1
+ run_mon $dir b --public-addr=${MON_IP}:${MONB_PUBLIC} --public-bind-addr=${MON_IP}:${MONB_BIND} || return 1
+ run_mon $dir c --public-addr=${MON_IP}:${MONC_PUBLIC} --public-bind-addr=${MON_IP}:${MONC_BIND} || return 1
+
+ # now forward the public port to the bind port.
+ port_forward ${MONA_PUBLIC} ${MONA_BIND}
+ port_forward ${MONB_PUBLIC} ${MONB_BIND}
+ port_forward ${MONC_PUBLIC} ${MONC_BIND}
+
+ # quorum should form
+ wait_for_quorum 300 3 || return 1
+
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+
+ create_pool hello 8 || return 1
+
+ echo "hello world" > $dir/hello
+ rados --pool hello put foo $dir/hello || return 1
+ rados --pool hello get foo $dir/hello2 || return 1
+ diff $dir/hello $dir/hello2 || return 1
+}
+
+main mon-bind "$@"
diff --git a/qa/standalone/mon/mon-created-time.sh b/qa/standalone/mon/mon-created-time.sh
new file mode 100755
index 000000000..4b8446059
--- /dev/null
+++ b/qa/standalone/mon/mon-created-time.sh
@@ -0,0 +1,54 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2015 SUSE LINUX GmbH
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7125" # git grep '\<7125\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_mon_created_time() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+
+ ceph mon dump || return 1
+
+ if test "$(ceph mon dump 2>/dev/null | sed -n '/created/p' | awk '{print $NF}')"x = ""x ; then
+ return 1
+ fi
+
+ if test "$(ceph mon dump 2>/dev/null | sed -n '/created/p' | awk '{print $NF}')"x = "0.000000"x ; then
+ return 1
+ fi
+}
+
+main mon-created-time "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/mon/mon-created-time.sh"
+# End:
diff --git a/qa/standalone/mon/mon-handle-forward.sh b/qa/standalone/mon/mon-handle-forward.sh
new file mode 100755
index 000000000..01c8f130f
--- /dev/null
+++ b/qa/standalone/mon/mon-handle-forward.sh
@@ -0,0 +1,64 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2013 Cloudwatt <libre.licensing@cloudwatt.com>
+# Copyright (C) 2014,2015 Red Hat <contact@redhat.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+
+ setup $dir || return 1
+
+ MONA=127.0.0.1:7300
+ MONB=127.0.0.1:7301
+ (
+ FSID=$(uuidgen)
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$FSID --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$MONA,$MONB "
+ run_mon $dir a --public-addr $MONA || return 1
+ run_mon $dir b --public-addr $MONB || return 1
+ )
+
+ timeout 360 ceph --mon-host-override $MONA mon stat || return 1
+ # check that MONB is indeed a peon
+ ceph --admin-daemon $(get_asok_path mon.b) mon_status |
+ grep '"peon"' || return 1
+ # when the leader ( MONA ) is used, there is no message forwarding
+ ceph --mon-host-override $MONA osd pool create POOL1 12
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
+ grep 'mon_command(.*"POOL1"' $dir/mon.a.log || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.b) log flush || return 1
+ grep 'mon_command(.*"POOL1"' $dir/mon.b.log && return 1
+ # when the peon ( MONB ) is used, the message is forwarded to the leader
+ ceph --mon-host-override $MONB osd pool create POOL2 12
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.b) log flush || return 1
+ grep 'forward_request.*mon_command(.*"POOL2"' $dir/mon.b.log || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
+ grep ' forward(mon_command(.*"POOL2"' $dir/mon.a.log || return 1
+ # forwarded messages must retain features from the original connection
+ features=$(sed -n -e 's|.*127.0.0.1:0.*accept features \([0-9][0-9]*\)|\1|p' < \
+ $dir/mon.b.log)
+ grep ' forward(mon_command(.*"POOL2".*con_features '$features $dir/mon.a.log || return 1
+
+ teardown $dir || return 1
+}
+
+main mon-handle-forward "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 TESTS=test/mon/mon-handle-forward.sh check"
+# End:
diff --git a/qa/standalone/mon/mon-last-epoch-clean.sh b/qa/standalone/mon/mon-last-epoch-clean.sh
new file mode 100755
index 000000000..82243103e
--- /dev/null
+++ b/qa/standalone/mon/mon-last-epoch-clean.sh
@@ -0,0 +1,307 @@
+#!/usr/bin/env bash
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7302" # git grep '\<7105\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+
+function check_lec_equals_pools() {
+
+ local pool_id=$1
+
+ report=$(ceph report)
+ lec=$(echo $report | \
+ jq '.osdmap_clean_epochs.min_last_epoch_clean')
+
+ if [[ -z "$pool_id" ]]; then
+ pools=($(echo $report | \
+ jq \
+ ".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \
+ " select(.floor == $lec) | .poolid"))
+
+ [[ ${#pools[*]} -eq 2 ]] || ( echo $report ; return 1 )
+ else
+ floor=($(echo $report | \
+ jq \
+ ".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \
+ " select(.poolid == $pool_id) | .floor"))
+
+ [[ $lec -eq $floor ]] || ( echo $report ; return 1 )
+ fi
+ return 0
+}
+
+function check_lec_lower_than_pool() {
+
+ local pool_id=$1
+ [[ -z "$pool_id" ]] && ( echo "expected pool_id as parameter" ; exit 1 )
+
+ report=$(ceph report)
+ lec=$(echo $report | \
+ jq '.osdmap_clean_epochs.min_last_epoch_clean')
+
+ floor=($(echo $report | \
+ jq \
+ ".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \
+ " select(.poolid == $pool_id) | .floor"))
+
+ [[ $lec -lt $floor ]] || ( echo $report ; return 1 )
+ return 0
+}
+
+function check_floor_pool_greater_than_pool() {
+
+ local pool_a=$1
+ local pool_b=$1
+ [[ -z "$pool_a" ]] && ( echo "expected id as first parameter" ; exit 1 )
+ [[ -z "$pool_b" ]] && ( echo "expected id as second parameter" ; exit 1 )
+
+ report=$(ceph report)
+
+ floor_a=($(echo $report | \
+ jq \
+ ".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \
+ " select(.poolid == $pool_a) | .floor"))
+
+ floor_b=($(echo $report | \
+ jq \
+ ".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \
+ " select(.poolid == $pool_b) | .floor"))
+
+ [[ $floor_a -gt $floor_b ]] || ( echo $report ; return 1 )
+ return 0
+}
+
+function check_lec_honours_osd() {
+
+ local osd=$1
+
+ report=$(ceph report)
+ lec=$(echo $report | \
+ jq '.osdmap_clean_epochs.min_last_epoch_clean')
+
+ if [[ -z "$osd" ]]; then
+ osds=($(echo $report | \
+ jq \
+ ".osdmap_clean_epochs.osd_epochs[] |" \
+ " select(.epoch >= $lec) | .id"))
+
+ [[ ${#osds[*]} -eq 3 ]] || ( echo $report ; return 1 )
+ else
+ epoch=($(echo $report | \
+ jq \
+ ".osdmap_clean_epochs.osd_epochs[] |" \
+ " select(.id == $id) | .epoch"))
+ [[ ${#epoch[*]} -eq 1 ]] || ( echo $report ; return 1 )
+ [[ ${epoch[0]} -ge $lec ]] || ( echo $report ; return 1 )
+ fi
+
+ return 0
+}
+
+function validate_fc() {
+ report=$(ceph report)
+ lec=$(echo $report | \
+ jq '.osdmap_clean_epochs.min_last_epoch_clean')
+ osdm_fc=$(echo $report | \
+ jq '.osdmap_first_committed')
+
+ [[ $lec -eq $osdm_fc ]] || ( echo $report ; return 1 )
+ return 0
+}
+
+function get_fc_lc_diff() {
+ report=$(ceph report)
+ osdm_fc=$(echo $report | \
+ jq '.osdmap_first_committed')
+ osdm_lc=$(echo $report | \
+ jq '.osdmap_last_committed')
+
+ echo $((osdm_lc - osdm_fc))
+}
+
+function get_pool_id() {
+
+ local pn=$1
+ [[ -z "$pn" ]] && ( echo "expected pool name as argument" ; exit 1 )
+
+ report=$(ceph report)
+ pool_id=$(echo $report | \
+ jq ".osdmap.pools[] | select(.pool_name == \"$pn\") | .pool")
+
+ [[ $pool_id -ge 0 ]] || \
+ ( echo "unexpected pool id for pool \'$pn\': $pool_id" ; return -1 )
+
+ echo $pool_id
+ return 0
+}
+
+function wait_for_total_num_maps() {
+ # rip wait_for_health, becaue it's easier than deduplicating the code
+ local -a delays=($(get_timeout_delays $TIMEOUT .1))
+ local -i loop=0
+ local -i v_diff=$1
+
+ while [[ $(get_fc_lc_diff) -gt $v_diff ]]; do
+ if (( $loop >= ${#delays[*]} )) ; then
+ echo "maps were not trimmed"
+ return 1
+ fi
+ sleep ${delays[$loop]}
+ loop+=1
+ done
+}
+
+function TEST_mon_last_clean_epoch() {
+
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x --mon-warn-on-pool-no-app=false || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ osd_pid=$(cat $dir/osd.2.pid)
+
+ sleep 5
+
+ ceph tell 'osd.*' injectargs '--osd-beacon-report-interval 10' || exit 1
+ ceph tell 'mon.*' injectargs \
+ '--mon-min-osdmap-epochs 2 --paxos-service-trim-min 1' || exit 1
+
+ create_pool foo 32
+ create_pool bar 32
+
+ foo_id=$(get_pool_id "foo")
+ bar_id=$(get_pool_id "bar")
+
+ [[ $foo_id -lt 0 ]] && ( echo "couldn't find pool 'foo' id" ; exit 1 )
+ [[ $bar_id -lt 0 ]] && ( echo "couldn't find pool 'bar' id" ; exit 1 )
+
+ # no real clue why we are getting these warnings, but let's make them go
+ # away so we can be happy.
+
+ ceph osd set-full-ratio 0.97
+ ceph osd set-backfillfull-ratio 0.97
+
+ wait_for_health_ok || exit 1
+
+ pre_map_diff=$(get_fc_lc_diff)
+ wait_for_total_num_maps 2
+ post_map_diff=$(get_fc_lc_diff)
+
+ [[ $post_map_diff -le $pre_map_diff ]] || exit 1
+
+ pre_map_diff=$post_map_diff
+
+ ceph osd pool set foo size 3
+ ceph osd pool set bar size 3
+
+ wait_for_health_ok || exit 1
+
+ check_lec_equals_pools || exit 1
+ check_lec_honours_osd || exit 1
+ validate_fc || exit 1
+
+ # down osd.2; expected result (because all pools' size equals 3):
+ # - number of committed maps increase over 2
+ # - lec equals fc
+ # - lec equals osd.2's epoch
+ # - all pools have floor equal to lec
+
+ while kill $osd_pid ; do sleep 1 ; done
+ ceph osd out 2
+ sleep 5 # seriously, just to make sure things settle; we may not need this.
+
+ # generate some maps
+ for ((i=0; i <= 10; ++i)); do
+ ceph osd set noup
+ sleep 1
+ ceph osd unset noup
+ sleep 1
+ done
+
+ post_map_diff=$(get_fc_lc_diff)
+ [[ $post_map_diff -gt 2 ]] || exit 1
+
+ validate_fc || exit 1
+ check_lec_equals_pools || exit 1
+ check_lec_honours_osd 2 || exit 1
+
+ # adjust pool 'bar' size to 2; expect:
+ # - number of committed maps still over 2
+ # - lec equals fc
+ # - lec equals pool 'foo' floor
+ # - pool 'bar' floor greater than pool 'foo'
+
+ ceph osd pool set bar size 2
+
+ diff_ver=$(get_fc_lc_diff)
+ [[ $diff_ver -gt 2 ]] || exit 1
+
+ validate_fc || exit 1
+
+ check_lec_equals_pools $foo_id || exit 1
+ check_lec_lower_than_pool $bar_id || exit 1
+
+ check_floor_pool_greater_than_pool $bar_id $foo_id || exit 1
+
+ # set pool 'foo' size to 2; expect:
+ # - health_ok
+ # - lec equals pools
+ # - number of committed maps decreases
+ # - lec equals fc
+
+ pre_map_diff=$(get_fc_lc_diff)
+
+ ceph osd pool set foo size 2 || exit 1
+ wait_for_clean || exit 1
+
+ check_lec_equals_pools || exit 1
+ validate_fc || exit 1
+
+ if ! wait_for_total_num_maps 2 ; then
+ post_map_diff=$(get_fc_lc_diff)
+ # number of maps is decreasing though, right?
+ [[ $post_map_diff -lt $pre_map_diff ]] || exit 1
+ fi
+
+ # bring back osd.2; expect:
+ # - health_ok
+ # - lec equals fc
+ # - number of committed maps equals 2
+ # - all pools have floor equal to lec
+
+ pre_map_diff=$(get_fc_lc_diff)
+
+ activate_osd $dir 2 || exit 1
+ wait_for_health_ok || exit 1
+ validate_fc || exit 1
+ check_lec_equals_pools || exit 1
+
+ if ! wait_for_total_num_maps 2 ; then
+ post_map_diff=$(get_fc_lc_diff)
+ # number of maps is decreasing though, right?
+ [[ $post_map_diff -lt $pre_map_diff ]] || exit 1
+ fi
+
+ return 0
+}
+
+main mon-last-clean-epoch "$@"
diff --git a/qa/standalone/mon/mon-osdmap-prune.sh b/qa/standalone/mon/mon-osdmap-prune.sh
new file mode 100755
index 000000000..f8f7876bb
--- /dev/null
+++ b/qa/standalone/mon/mon-osdmap-prune.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+base_test=$CEPH_ROOT/qa/workunits/mon/test_mon_osdmap_prune.sh
+
+function run() {
+
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7115"
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none --mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_osdmap_prune() {
+
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+
+ sleep 5
+
+ # we are getting OSD_OUT_OF_ORDER_FULL health errors, and it's not clear
+ # why. so, to make the health checks happy, mask those errors.
+ ceph osd set-full-ratio 0.97
+ ceph osd set-backfillfull-ratio 0.97
+
+ ceph config set osd osd_beacon_report_interval 10 || return 1
+ ceph config set mon mon_debug_extra_checks true || return 1
+
+ ceph config set mon mon_min_osdmap_epochs 100 || return 1
+ ceph config set mon mon_osdmap_full_prune_enabled true || return 1
+ ceph config set mon mon_osdmap_full_prune_min 200 || return 1
+ ceph config set mon mon_osdmap_full_prune_interval 10 || return 1
+ ceph config set mon mon_osdmap_full_prune_txsize 100 || return 1
+
+
+ bash -x $base_test || return 1
+
+ return 0
+}
+
+main mon-osdmap-prune "$@"
+
diff --git a/qa/standalone/mon/mon-ping.sh b/qa/standalone/mon/mon-ping.sh
new file mode 100755
index 000000000..1f5096be1
--- /dev/null
+++ b/qa/standalone/mon/mon-ping.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2015 SUSE LINUX GmbH
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7119" # git grep '\<7119\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_mon_ping() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+
+ ceph ping mon.a || return 1
+}
+
+main mon-ping "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/mon/mon-ping.sh"
+# End:
diff --git a/qa/standalone/mon/mon-scrub.sh b/qa/standalone/mon/mon-scrub.sh
new file mode 100755
index 000000000..158bd434c
--- /dev/null
+++ b/qa/standalone/mon/mon-scrub.sh
@@ -0,0 +1,49 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
+# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7120" # git grep '\<7120\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_mon_scrub() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+
+ ceph mon scrub || return 1
+}
+
+main mon-scrub "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/mon/mon-scrub.sh"
+# End:
diff --git a/qa/standalone/mon/mon-seesaw.sh b/qa/standalone/mon/mon-seesaw.sh
new file mode 100755
index 000000000..1c97847b9
--- /dev/null
+++ b/qa/standalone/mon/mon-seesaw.sh
@@ -0,0 +1,72 @@
+#!/usr/bin/env bash
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON_A="127.0.0.1:7139" # git grep '\<7139\>' : there must be only one
+ export CEPH_MON_B="127.0.0.1:7141" # git grep '\<7141\>' : there must be only one
+ export CEPH_MON_C="127.0.0.1:7142" # git grep '\<7142\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+
+ export BASE_CEPH_ARGS=$CEPH_ARGS
+ CEPH_ARGS+="--mon-host=$CEPH_MON_A "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_mon_seesaw() {
+ local dir=$1
+
+ setup $dir || return
+
+ # start with 1 mon
+ run_mon $dir aa --public-addr $CEPH_MON_A || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+
+ wait_for_quorum 300 1 || return 1
+
+ # add in a second
+ run_mon $dir bb --public-addr $CEPH_MON_B || return 1
+ CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B"
+ wait_for_quorum 300 2 || return 1
+
+ # remove the first one
+ ceph mon rm aa || return 1
+ CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_B"
+ sleep 5
+ wait_for_quorum 300 1 || return 1
+
+ # do some stuff that requires the osds be able to communicate with the
+ # mons. (see http://tracker.ceph.com/issues/17558)
+ ceph osd pool create foo 8
+ rados -p foo bench 1 write
+ wait_for_clean || return 1
+
+ # nuke monstore so that it will rejoin (otherwise we get
+ # "not in monmap and have been in a quorum before; must have been removed"
+ rm -rf $dir/aa
+
+ # add a back in
+ # (use a different addr to avoid bind issues)
+ run_mon $dir aa --public-addr $CEPH_MON_C || return 1
+ CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_C,$CEPH_MON_B"
+ wait_for_quorum 300 2 || return 1
+}
+
+main mon-seesaw "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/mon/mon-ping.sh"
+# End:
diff --git a/qa/standalone/mon/osd-crush.sh b/qa/standalone/mon/osd-crush.sh
new file mode 100755
index 000000000..aa7cac694
--- /dev/null
+++ b/qa/standalone/mon/osd-crush.sh
@@ -0,0 +1,196 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
+# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7104" # git grep '\<7104\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | ${SED} -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_crush_rule_create_simple() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+
+ ceph --format xml osd crush rule dump replicated_rule | \
+ egrep '<op>take</op><item>[^<]+</item><item_name>default</item_name>' | \
+ grep '<op>choose_firstn</op><num>0</num><type>osd</type>' || return 1
+ local rule=rule0
+ local root=host1
+ ceph osd crush add-bucket $root host
+ local failure_domain=osd
+ ceph osd crush rule create-simple $rule $root $failure_domain || return 1
+ ceph osd crush rule create-simple $rule $root $failure_domain 2>&1 | \
+ grep "$rule already exists" || return 1
+ ceph --format xml osd crush rule dump $rule | \
+ egrep '<op>take</op><item>[^<]+</item><item_name>'$root'</item_name>' | \
+ grep '<op>choose_firstn</op><num>0</num><type>'$failure_domain'</type>' || return 1
+ ceph osd crush rule rm $rule || return 1
+}
+
+function TEST_crush_rule_dump() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+
+ local rule=rule1
+ ceph osd crush rule create-erasure $rule || return 1
+ test $(ceph --format json osd crush rule dump $rule | \
+ jq ".rule_name == \"$rule\"") == true || return 1
+ test $(ceph --format json osd crush rule dump | \
+ jq "map(select(.rule_name == \"$rule\")) | length == 1") == true || return 1
+ ! ceph osd crush rule dump non_existent_rule || return 1
+ ceph osd crush rule rm $rule || return 1
+}
+
+function TEST_crush_rule_rm() {
+ local rule=erasure2
+
+ run_mon $dir a || return 1
+
+ ceph osd crush rule create-erasure $rule default || return 1
+ ceph osd crush rule ls | grep $rule || return 1
+ ceph osd crush rule rm $rule || return 1
+ ! ceph osd crush rule ls | grep $rule || return 1
+}
+
+function TEST_crush_rule_create_erasure() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ # should have at least one OSD
+ run_osd $dir 0 || return 1
+
+ local rule=rule3
+ #
+ # create a new rule with the default profile, implicitly
+ #
+ ceph osd crush rule create-erasure $rule || return 1
+ ceph osd crush rule create-erasure $rule 2>&1 | \
+ grep "$rule already exists" || return 1
+ ceph --format xml osd crush rule dump $rule | \
+ egrep '<op>take</op><item>[^<]+</item><item_name>default</item_name>' | \
+ grep '<op>chooseleaf_indep</op><num>0</num><type>host</type>' || return 1
+ ceph osd crush rule rm $rule || return 1
+ ! ceph osd crush rule ls | grep $rule || return 1
+ #
+ # create a new rule with the default profile, explicitly
+ #
+ ceph osd crush rule create-erasure $rule default || return 1
+ ceph osd crush rule ls | grep $rule || return 1
+ ceph osd crush rule rm $rule || return 1
+ ! ceph osd crush rule ls | grep $rule || return 1
+ #
+ # create a new rule and the default profile, implicitly
+ #
+ ceph osd erasure-code-profile rm default || return 1
+ ! ceph osd erasure-code-profile ls | grep default || return 1
+ ceph osd crush rule create-erasure $rule || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1
+ grep 'profile set default' $dir/mon.a.log || return 1
+ ceph osd erasure-code-profile ls | grep default || return 1
+ ceph osd crush rule rm $rule || return 1
+ ! ceph osd crush rule ls | grep $rule || return 1
+}
+
+function TEST_add_rule_failed() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+
+ local root=host1
+
+ ceph osd crush add-bucket $root host
+ ceph osd crush rule create-simple test_rule1 $root osd firstn || return 1
+ ceph osd crush rule create-simple test_rule2 $root osd firstn || return 1
+ ceph osd getcrushmap > $dir/crushmap || return 1
+ crushtool --decompile $dir/crushmap > $dir/crushmap.txt || return 1
+ for i in $(seq 3 255)
+ do
+ cat <<EOF
+rule test_rule$i {
+ id $i
+ type replicated
+ step take $root
+ step choose firstn 0 type osd
+ step emit
+}
+EOF
+ done >> $dir/crushmap.txt
+ crushtool --compile $dir/crushmap.txt -o $dir/crushmap || return 1
+ ceph osd setcrushmap -i $dir/crushmap || return 1
+ ceph osd crush rule create-simple test_rule_nospace $root osd firstn 2>&1 | grep "Error ENOSPC" || return 1
+
+}
+
+function TEST_crush_rename_bucket() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+
+ ceph osd crush add-bucket host1 host
+ ceph osd tree
+ ! ceph osd tree | grep host2 || return 1
+ ceph osd crush rename-bucket host1 host2 || return 1
+ ceph osd tree
+ ceph osd tree | grep host2 || return 1
+ ceph osd crush rename-bucket host1 host2 || return 1 # idempotency
+ ceph osd crush rename-bucket nonexistent something 2>&1 | grep "Error ENOENT" || return 1
+}
+
+function TEST_crush_ls_node() {
+ local dir=$1
+ run_mon $dir a || return 1
+ ceph osd crush add-bucket default1 root
+ ceph osd crush add-bucket host1 host
+ ceph osd crush move host1 root=default1
+ ceph osd crush ls default1 | grep host1 || return 1
+ ceph osd crush ls default2 2>&1 | grep "Error ENOENT" || return 1
+}
+
+function TEST_crush_reject_empty() {
+ local dir=$1
+ run_mon $dir a --osd_pool_default_size=1 --mon_allow_pool_size_one=true || return 1
+ # should have at least one OSD
+ run_osd $dir 0 || return 1
+ create_rbd_pool || return 1
+
+ local empty_map=$dir/empty_map
+ :> $empty_map.txt
+ crushtool -c $empty_map.txt -o $empty_map.map || return 1
+ expect_failure $dir "Error EINVAL" \
+ ceph osd setcrushmap -i $empty_map.map || return 1
+}
+
+main osd-crush "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/mon/osd-crush.sh"
+# End:
diff --git a/qa/standalone/mon/osd-df.sh b/qa/standalone/mon/osd-df.sh
new file mode 100755
index 000000000..962909fdb
--- /dev/null
+++ b/qa/standalone/mon/osd-df.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7113" # git grep '\<7113\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_osd_df() {
+ local dir=$1
+ setup $dir || return 1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+ run_osd $dir 4 || return 1
+ run_osd $dir 5 || return 1
+
+ # normal case
+ ceph osd df --f json-pretty | grep osd.0 || return 1
+ ceph osd df --f json-pretty | grep osd.1 || return 1
+ ceph osd df --f json-pretty | grep osd.2 || return 1
+ ceph osd df --f json-pretty | grep osd.3 || return 1
+ ceph osd df --f json-pretty | grep osd.4 || return 1
+ ceph osd df --f json-pretty | grep osd.5 || return 1
+
+ # filter by device class
+ osd_class=$(ceph osd crush get-device-class 0)
+ ceph osd df class $osd_class --f json-pretty | grep 'osd.0' || return 1
+ # post-nautilus we require filter-type no more
+ ceph osd df $osd_class --f json-pretty | grep 'osd.0' || return 1
+ ceph osd crush rm-device-class 0 || return 1
+ ceph osd crush set-device-class aaa 0 || return 1
+ ceph osd df aaa --f json-pretty | grep 'osd.0' || return 1
+ ceph osd df aaa --f json-pretty | grep 'osd.1' && return 1
+ # reset osd.1's device class
+ ceph osd crush rm-device-class 0 || return 1
+ ceph osd crush set-device-class $osd_class 0 || return 1
+
+ # filter by crush node
+ ceph osd df osd.0 --f json-pretty | grep osd.0 || return 1
+ ceph osd df osd.0 --f json-pretty | grep osd.1 && return 1
+ ceph osd crush move osd.0 root=default host=foo || return 1
+ ceph osd crush move osd.1 root=default host=foo || return 1
+ ceph osd crush move osd.2 root=default host=foo || return 1
+ ceph osd crush move osd.3 root=default host=bar || return 1
+ ceph osd crush move osd.4 root=default host=bar || return 1
+ ceph osd crush move osd.5 root=default host=bar || return 1
+ ceph osd df tree foo --f json-pretty | grep foo || return 1
+ ceph osd df tree foo --f json-pretty | grep bar && return 1
+ ceph osd df foo --f json-pretty | grep osd.0 || return 1
+ ceph osd df foo --f json-pretty | grep osd.1 || return 1
+ ceph osd df foo --f json-pretty | grep osd.2 || return 1
+ ceph osd df foo --f json-pretty | grep osd.3 && return 1
+ ceph osd df foo --f json-pretty | grep osd.4 && return 1
+ ceph osd df foo --f json-pretty | grep osd.5 && return 1
+ ceph osd df tree bar --f json-pretty | grep bar || return 1
+ ceph osd df tree bar --f json-pretty | grep foo && return 1
+ ceph osd df bar --f json-pretty | grep osd.0 && return 1
+ ceph osd df bar --f json-pretty | grep osd.1 && return 1
+ ceph osd df bar --f json-pretty | grep osd.2 && return 1
+ ceph osd df bar --f json-pretty | grep osd.3 || return 1
+ ceph osd df bar --f json-pretty | grep osd.4 || return 1
+ ceph osd df bar --f json-pretty | grep osd.5 || return 1
+
+ # filter by pool
+ ceph osd crush rm-device-class all || return 1
+ ceph osd crush set-device-class nvme 0 1 3 4 || return 1
+ ceph osd crush rule create-replicated nvme-rule default host nvme || return 1
+ ceph osd pool create nvme-pool 12 12 nvme-rule || return 1
+ ceph osd df nvme-pool --f json-pretty | grep osd.0 || return 1
+ ceph osd df nvme-pool --f json-pretty | grep osd.1 || return 1
+ ceph osd df nvme-pool --f json-pretty | grep osd.2 && return 1
+ ceph osd df nvme-pool --f json-pretty | grep osd.3 || return 1
+ ceph osd df nvme-pool --f json-pretty | grep osd.4 || return 1
+ ceph osd df nvme-pool --f json-pretty | grep osd.5 && return 1
+
+ teardown $dir || return 1
+}
+
+main osd-df "$@"
diff --git a/qa/standalone/mon/osd-erasure-code-profile.sh b/qa/standalone/mon/osd-erasure-code-profile.sh
new file mode 100755
index 000000000..0afc5fc0b
--- /dev/null
+++ b/qa/standalone/mon/osd-erasure-code-profile.sh
@@ -0,0 +1,240 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
+# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7220" # git grep '\<7220\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_set() {
+ local dir=$1
+ local id=$2
+
+ run_mon $dir a || return 1
+
+ local profile=myprofile
+ #
+ # no key=value pairs : use the default configuration
+ #
+ ceph osd erasure-code-profile set $profile 2>&1 || return 1
+ ceph osd erasure-code-profile get $profile | \
+ grep plugin=jerasure || return 1
+ ceph osd erasure-code-profile rm $profile
+ #
+ # key=value pairs override the default
+ #
+ ceph osd erasure-code-profile set $profile \
+ key=value plugin=isa || return 1
+ ceph osd erasure-code-profile get $profile | \
+ grep -e key=value -e plugin=isa || return 1
+ #
+ # --force is required to override an existing profile
+ #
+ ! ceph osd erasure-code-profile set $profile > $dir/out 2>&1 || return 1
+ grep 'will not override' $dir/out || return 1
+ ceph osd erasure-code-profile set $profile key=other --force || return 1
+ ceph osd erasure-code-profile get $profile | \
+ grep key=other || return 1
+
+ ceph osd erasure-code-profile rm $profile # cleanup
+}
+
+function TEST_ls() {
+ local dir=$1
+ local id=$2
+
+ run_mon $dir a || return 1
+
+ local profile=myprofile
+ ! ceph osd erasure-code-profile ls | grep $profile || return 1
+ ceph osd erasure-code-profile set $profile 2>&1 || return 1
+ ceph osd erasure-code-profile ls | grep $profile || return 1
+ ceph --format xml osd erasure-code-profile ls | \
+ grep "<profile>$profile</profile>" || return 1
+
+ ceph osd erasure-code-profile rm $profile # cleanup
+}
+
+function TEST_rm() {
+ local dir=$1
+ local id=$2
+
+ run_mon $dir a || return 1
+
+ local profile=myprofile
+ ceph osd erasure-code-profile set $profile 2>&1 || return 1
+ ceph osd erasure-code-profile ls | grep $profile || return 1
+ ceph osd erasure-code-profile rm $profile || return 1
+ ! ceph osd erasure-code-profile ls | grep $profile || return 1
+ ceph osd erasure-code-profile rm WRONG 2>&1 | \
+ grep "WRONG does not exist" || return 1
+
+ ceph osd erasure-code-profile set $profile || return 1
+ create_pool poolname 12 12 erasure $profile || return 1
+ ! ceph osd erasure-code-profile rm $profile > $dir/out 2>&1 || return 1
+ grep "poolname.*using.*$profile" $dir/out || return 1
+ ceph osd pool delete poolname poolname --yes-i-really-really-mean-it || return 1
+ ceph osd erasure-code-profile rm $profile || return 1
+
+ ceph osd erasure-code-profile rm $profile # cleanup
+}
+
+function TEST_get() {
+ local dir=$1
+ local id=$2
+
+ run_mon $dir a || return 1
+
+ local default_profile=default
+ ceph osd erasure-code-profile get $default_profile | \
+ grep plugin=jerasure || return 1
+ ceph --format xml osd erasure-code-profile get $default_profile | \
+ grep '<plugin>jerasure</plugin>' || return 1
+ ! ceph osd erasure-code-profile get WRONG > $dir/out 2>&1 || return 1
+ grep -q "unknown erasure code profile 'WRONG'" $dir/out || return 1
+}
+
+function TEST_set_idempotent() {
+ local dir=$1
+ local id=$2
+
+ run_mon $dir a || return 1
+ #
+ # The default profile is set using a code path different from
+ # ceph osd erasure-code-profile set: verify that it is idempotent,
+ # as if it was using the same code path.
+ #
+ ceph osd erasure-code-profile set default k=2 m=2 2>&1 || return 1
+ local profile
+ #
+ # Because plugin=jerasure is the default, it uses a slightly
+ # different code path where defaults (m=1 for instance) are added
+ # implicitly.
+ #
+ profile=profileidempotent1
+ ! ceph osd erasure-code-profile ls | grep $profile || return 1
+ ceph osd erasure-code-profile set $profile k=2 crush-failure-domain=osd 2>&1 || return 1
+ ceph osd erasure-code-profile ls | grep $profile || return 1
+ ceph osd erasure-code-profile set $profile k=2 crush-failure-domain=osd 2>&1 || return 1
+ ceph osd erasure-code-profile rm $profile # cleanup
+
+ #
+ # In the general case the profile is exactly what is on
+ #
+ profile=profileidempotent2
+ ! ceph osd erasure-code-profile ls | grep $profile || return 1
+ ceph osd erasure-code-profile set $profile plugin=lrc k=4 m=2 l=3 crush-failure-domain=osd 2>&1 || return 1
+ ceph osd erasure-code-profile ls | grep $profile || return 1
+ ceph osd erasure-code-profile set $profile plugin=lrc k=4 m=2 l=3 crush-failure-domain=osd 2>&1 || return 1
+ ceph osd erasure-code-profile rm $profile # cleanup
+}
+
+function TEST_format_invalid() {
+ local dir=$1
+
+ local profile=profile
+ # osd_pool_default_erasure-code-profile is
+ # valid JSON but not of the expected type
+ run_mon $dir a \
+ --osd_pool_default_erasure-code-profile 1 || return 1
+ ! ceph osd erasure-code-profile set $profile > $dir/out 2>&1 || return 1
+ cat $dir/out
+ grep 'must be a JSON object' $dir/out || return 1
+}
+
+function TEST_format_json() {
+ local dir=$1
+
+ # osd_pool_default_erasure-code-profile is JSON
+ expected='"plugin":"isa"'
+ run_mon $dir a \
+ --osd_pool_default_erasure-code-profile "{$expected}" || return 1
+ ceph --format json osd erasure-code-profile get default | \
+ grep "$expected" || return 1
+}
+
+function TEST_format_plain() {
+ local dir=$1
+
+ # osd_pool_default_erasure-code-profile is plain text
+ expected='"plugin":"isa"'
+ run_mon $dir a \
+ --osd_pool_default_erasure-code-profile "plugin=isa" || return 1
+ ceph --format json osd erasure-code-profile get default | \
+ grep "$expected" || return 1
+}
+
+function TEST_profile_k_sanity() {
+ local dir=$1
+ local profile=profile-sanity
+
+ run_mon $dir a || return 1
+
+ expect_failure $dir 'k must be a multiple of (k + m) / l' \
+ ceph osd erasure-code-profile set $profile \
+ plugin=lrc \
+ l=1 \
+ k=1 \
+ m=1 || return 1
+
+ if erasure_code_plugin_exists isa ; then
+ expect_failure $dir 'k=1 must be >= 2' \
+ ceph osd erasure-code-profile set $profile \
+ plugin=isa \
+ k=1 \
+ m=1 || return 1
+ else
+ echo "SKIP because plugin isa has not been built"
+ fi
+
+ expect_failure $dir 'k=1 must be >= 2' \
+ ceph osd erasure-code-profile set $profile \
+ plugin=jerasure \
+ k=1 \
+ m=1 || return 1
+}
+
+function TEST_invalid_crush_failure_domain() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+
+ local profile=ec_profile
+ local crush_failure_domain=invalid_failure_domain
+
+ ! ceph osd erasure-code-profile set $profile k=4 m=2 crush-failure-domain=$crush_failure_domain 2>&1 || return 1
+}
+
+main osd-erasure-code-profile "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/mon/osd-erasure-code-profile.sh"
+# End:
diff --git a/qa/standalone/mon/osd-pool-create.sh b/qa/standalone/mon/osd-pool-create.sh
new file mode 100755
index 000000000..6d2c5ad3e
--- /dev/null
+++ b/qa/standalone/mon/osd-pool-create.sh
@@ -0,0 +1,307 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2013, 2014 Cloudwatt <libre.licensing@cloudwatt.com>
+# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7105" # git grep '\<7105\>' : there must be only one
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+ export CEPH_ARGS
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+# Before http://tracker.ceph.com/issues/8307 the invalid profile was created
+function TEST_erasure_invalid_profile() {
+ local dir=$1
+ run_mon $dir a || return 1
+ local poolname=pool_erasure
+ local notaprofile=not-a-valid-erasure-code-profile
+ ! ceph osd pool create $poolname 12 12 erasure $notaprofile || return 1
+ ! ceph osd erasure-code-profile ls | grep $notaprofile || return 1
+}
+
+function TEST_erasure_crush_rule() {
+ local dir=$1
+ run_mon $dir a || return 1
+ #
+ # choose the crush rule used with an erasure coded pool
+ #
+ local crush_rule=myrule
+ ! ceph osd crush rule ls | grep $crush_rule || return 1
+ ceph osd crush rule create-erasure $crush_rule
+ ceph osd crush rule ls | grep $crush_rule
+ local poolname
+ poolname=pool_erasure1
+ ! ceph --format json osd dump | grep '"crush_rule":1' || return 1
+ ceph osd pool create $poolname 12 12 erasure default $crush_rule
+ ceph --format json osd dump | grep '"crush_rule":1' || return 1
+ #
+ # a crush rule by the name of the pool is implicitly created
+ #
+ poolname=pool_erasure2
+ ceph osd erasure-code-profile set myprofile
+ ceph osd pool create $poolname 12 12 erasure myprofile
+ ceph osd crush rule ls | grep $poolname || return 1
+ #
+ # a non existent crush rule given in argument is an error
+ # http://tracker.ceph.com/issues/9304
+ #
+ poolname=pool_erasure3
+ ! ceph osd pool create $poolname 12 12 erasure myprofile INVALIDRULE || return 1
+}
+
+function TEST_erasure_code_profile_default() {
+ local dir=$1
+ run_mon $dir a || return 1
+ ceph osd erasure-code-profile rm default || return 1
+ ! ceph osd erasure-code-profile ls | grep default || return 1
+ ceph osd pool create $poolname 12 12 erasure default
+ ceph osd erasure-code-profile ls | grep default || return 1
+}
+
+function TEST_erasure_crush_stripe_unit() {
+ local dir=$1
+ # the default stripe unit is used to initialize the pool
+ run_mon $dir a --public-addr $CEPH_MON
+ stripe_unit=$(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit)
+ eval local $(ceph osd erasure-code-profile get myprofile | grep k=)
+ stripe_width = $((stripe_unit * k))
+ ceph osd pool create pool_erasure 12 12 erasure
+ ceph --format json osd dump | tee $dir/osd.json
+ grep '"stripe_width":'$stripe_width $dir/osd.json > /dev/null || return 1
+}
+
+function TEST_erasure_crush_stripe_unit_padded() {
+ local dir=$1
+ # setting osd_pool_erasure_code_stripe_unit modifies the stripe_width
+ # and it is padded as required by the default plugin
+ profile+=" plugin=jerasure"
+ profile+=" technique=reed_sol_van"
+ k=4
+ profile+=" k=$k"
+ profile+=" m=2"
+ actual_stripe_unit=2048
+ desired_stripe_unit=$((actual_stripe_unit - 1))
+ actual_stripe_width=$((actual_stripe_unit * k))
+ run_mon $dir a \
+ --osd_pool_erasure_code_stripe_unit $desired_stripe_unit \
+ --osd_pool_default_erasure_code_profile "$profile" || return 1
+ ceph osd pool create pool_erasure 12 12 erasure
+ ceph osd dump | tee $dir/osd.json
+ grep "stripe_width $actual_stripe_width" $dir/osd.json > /dev/null || return 1
+}
+
+function TEST_erasure_code_pool() {
+ local dir=$1
+ run_mon $dir a || return 1
+ ceph --format json osd dump > $dir/osd.json
+ local expected='"erasure_code_profile":"default"'
+ ! grep "$expected" $dir/osd.json || return 1
+ ceph osd pool create erasurecodes 12 12 erasure
+ ceph --format json osd dump | tee $dir/osd.json
+ grep "$expected" $dir/osd.json > /dev/null || return 1
+
+ ceph osd pool create erasurecodes 12 12 erasure 2>&1 | \
+ grep 'already exists' || return 1
+ ceph osd pool create erasurecodes 12 12 2>&1 | \
+ grep 'cannot change to type replicated' || return 1
+}
+
+function TEST_replicated_pool_with_rule() {
+ local dir=$1
+ run_mon $dir a
+ local rule=rule0
+ local root=host1
+ ceph osd crush add-bucket $root host
+ local failure_domain=osd
+ local poolname=mypool
+ ceph osd crush rule create-simple $rule $root $failure_domain || return 1
+ ceph osd crush rule ls | grep $rule
+ ceph osd pool create $poolname 12 12 replicated $rule || return 1
+ rule_id=`ceph osd crush rule dump $rule | grep "rule_id" | awk -F[' ':,] '{print $4}'`
+ ceph osd pool get $poolname crush_rule 2>&1 | \
+ grep "crush_rule: $rule_id" || return 1
+ #non-existent crush rule
+ ceph osd pool create newpool 12 12 replicated non-existent 2>&1 | \
+ grep "doesn't exist" || return 1
+}
+
+function TEST_erasure_code_pool_lrc() {
+ local dir=$1
+ run_mon $dir a || return 1
+
+ ceph osd erasure-code-profile set LRCprofile \
+ plugin=lrc \
+ mapping=DD_ \
+ layers='[ [ "DDc", "" ] ]' || return 1
+
+ ceph --format json osd dump > $dir/osd.json
+ local expected='"erasure_code_profile":"LRCprofile"'
+ local poolname=erasurecodes
+ ! grep "$expected" $dir/osd.json || return 1
+ ceph osd pool create $poolname 12 12 erasure LRCprofile
+ ceph --format json osd dump | tee $dir/osd.json
+ grep "$expected" $dir/osd.json > /dev/null || return 1
+ ceph osd crush rule ls | grep $poolname || return 1
+}
+
+function TEST_replicated_pool() {
+ local dir=$1
+ run_mon $dir a || return 1
+ ceph osd pool create replicated 12 12 replicated replicated_rule || return 1
+ ceph osd pool create replicated 12 12 replicated replicated_rule 2>&1 | \
+ grep 'already exists' || return 1
+ # default is replicated
+ ceph osd pool create replicated1 12 12 || return 1
+ # default is replicated, pgp_num = pg_num
+ ceph osd pool create replicated2 12 || return 1
+ ceph osd pool create replicated 12 12 erasure 2>&1 | \
+ grep 'cannot change to type erasure' || return 1
+}
+
+function TEST_no_pool_delete() {
+ local dir=$1
+ run_mon $dir a || return 1
+ ceph osd pool create foo 1 || return 1
+ ceph tell mon.a injectargs -- --no-mon-allow-pool-delete || return 1
+ ! ceph osd pool delete foo foo --yes-i-really-really-mean-it || return 1
+ ceph tell mon.a injectargs -- --mon-allow-pool-delete || return 1
+ ceph osd pool delete foo foo --yes-i-really-really-mean-it || return 1
+}
+
+function TEST_utf8_cli() {
+ local dir=$1
+ run_mon $dir a || return 1
+ # Hopefully it's safe to include literal UTF-8 characters to test
+ # the fix for http://tracker.ceph.com/issues/7387. If it turns out
+ # to not be OK (when is the default encoding *not* UTF-8?), maybe
+ # the character '黄' can be replaced with the escape $'\xe9\xbb\x84'
+ OLDLANG="$LANG"
+ export LANG=en_US.UTF-8
+ ceph osd pool create 黄 16 || return 1
+ ceph osd lspools 2>&1 | \
+ grep "黄" || return 1
+ ceph -f json-pretty osd dump | \
+ python3 -c "import json; import sys; json.load(sys.stdin)" || return 1
+ ceph osd pool delete 黄 黄 --yes-i-really-really-mean-it
+ export LANG="$OLDLANG"
+}
+
+function check_pool_priority() {
+ local dir=$1
+ shift
+ local pools=$1
+ shift
+ local spread="$1"
+ shift
+ local results="$1"
+
+ setup $dir || return 1
+
+ EXTRA_OPTS="--debug_allow_any_pool_priority=true"
+ export EXTRA_OPTS
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+
+ # Add pool 0 too
+ for i in $(seq 0 $pools)
+ do
+ num=$(expr $i + 1)
+ ceph osd pool create test${num} 1 1
+ done
+
+ wait_for_clean || return 1
+ for i in $(seq 0 $pools)
+ do
+ num=$(expr $i + 1)
+ ceph osd pool set test${num} recovery_priority $(expr $i \* $spread)
+ done
+
+ #grep "recovery_priority.*pool set" out/mon.a.log
+
+ bin/ceph osd dump
+
+ # Restart everything so mon converts the priorities
+ kill_daemons
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ activate_osd $dir 0 || return 1
+ activate_osd $dir 1 || return 1
+ activate_osd $dir 2 || return 1
+ sleep 5
+
+ grep convert $dir/mon.a.log
+ ceph osd dump
+
+ pos=1
+ for i in $(ceph osd dump | grep ^pool | sed 's/.*recovery_priority //' | awk '{ print $1 }')
+ do
+ result=$(echo $results | awk "{ print \$${pos} }")
+ # A value of 0 is an unset value so sed/awk gets "pool"
+ if test $result = "0"
+ then
+ result="pool"
+ fi
+ test "$result" = "$i" || return 1
+ pos=$(expr $pos + 1)
+ done
+}
+
+function TEST_pool_pos_only_prio() {
+ local dir=$1
+ check_pool_priority $dir 20 5 "0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10" || return 1
+}
+
+function TEST_pool_neg_only_prio() {
+ local dir=$1
+ check_pool_priority $dir 20 -5 "0 0 -1 -1 -2 -2 -3 -3 -4 -4 -5 -5 -6 -6 -7 -7 -8 -8 -9 -9 -10" || return 1
+}
+
+function TEST_pool_both_prio() {
+ local dir=$1
+ check_pool_priority $dir 20 "5 - 50" "-10 -9 -8 -7 -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8 9 10" || return 1
+}
+
+function TEST_pool_both_prio_no_neg() {
+ local dir=$1
+ check_pool_priority $dir 20 "2 - 4" "-4 -2 0 0 1 1 2 2 3 3 4 5 5 6 6 7 7 8 8 9 10" || return 1
+}
+
+function TEST_pool_both_prio_no_pos() {
+ local dir=$1
+ check_pool_priority $dir 20 "2 - 36" "-10 -9 -8 -8 -7 -7 -6 -6 -5 -5 -4 -3 -3 -2 -2 -1 -1 0 0 2 4" || return 1
+}
+
+
+main osd-pool-create "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/mon/osd-pool-create.sh"
+# End:
diff --git a/qa/standalone/mon/osd-pool-df.sh b/qa/standalone/mon/osd-pool-df.sh
new file mode 100755
index 000000000..d2b80ec72
--- /dev/null
+++ b/qa/standalone/mon/osd-pool-df.sh
@@ -0,0 +1,76 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2017 Tencent <contact@tencent.com>
+#
+# Author: Chang Liu <liuchang0812@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7113" # git grep '\<7113\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_ceph_df() {
+ local dir=$1
+ setup $dir || return 1
+
+ run_mon $dir a || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+ run_osd $dir 4 || return 1
+ run_osd $dir 5 || return 1
+ run_mgr $dir x || return 1
+
+ profile+=" plugin=jerasure"
+ profile+=" technique=reed_sol_van"
+ profile+=" k=4"
+ profile+=" m=2"
+ profile+=" crush-failure-domain=osd"
+
+ ceph osd erasure-code-profile set ec42profile ${profile}
+
+ local rep_poolname=testcephdf_replicate
+ local ec_poolname=testcephdf_erasurecode
+ create_pool $rep_poolname 6 6 replicated
+ create_pool $ec_poolname 6 6 erasure ec42profile
+ flush_pg_stats
+
+ local global_avail=`ceph df -f json | jq '.stats.total_avail_bytes'`
+ local rep_avail=`ceph df -f json | jq '.pools | map(select(.name == "'$rep_poolname'"))[0].stats.max_avail'`
+ local ec_avail=`ceph df -f json | jq '.pools | map(select(.name == "'$ec_poolname'"))[0].stats.max_avail'`
+
+ echo "${global_avail} >= ${rep_avail}*3" | bc || return 1
+ echo "${global_avail} >= ${ec_avail}*1.5" | bc || return 1
+
+ ceph osd pool delete $rep_poolname $rep_poolname --yes-i-really-really-mean-it
+ ceph osd pool delete $ec_poolname $ec_poolname --yes-i-really-really-mean-it
+ ceph osd erasure-code-profile rm ec42profile
+ teardown $dir || return 1
+}
+
+main osd-pool-df "$@"
diff --git a/qa/standalone/mon/test_pool_quota.sh b/qa/standalone/mon/test_pool_quota.sh
new file mode 100755
index 000000000..b87ec2232
--- /dev/null
+++ b/qa/standalone/mon/test_pool_quota.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+
+#
+# Generic pool quota test
+#
+
+# Includes
+
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:17108" # git grep '\<17108\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ $func $dir || return 1
+ done
+}
+
+function TEST_pool_quota() {
+ local dir=$1
+ setup $dir || return 1
+
+ run_mon $dir a || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+
+ local poolname=testquota
+ create_pool $poolname 20
+ local objects=`ceph df detail | grep -w $poolname|awk '{print $3}'`
+ local bytes=`ceph df detail | grep -w $poolname|awk '{print $4}'`
+
+ echo $objects
+ echo $bytes
+ if [ $objects != 'N/A' ] || [ $bytes != 'N/A' ] ;
+ then
+ return 1
+ fi
+
+ ceph osd pool set-quota $poolname max_objects 1000
+ ceph osd pool set-quota $poolname max_bytes 1024
+
+ objects=`ceph df detail | grep -w $poolname|awk '{print $3}'`
+ bytes=`ceph df detail | grep -w $poolname|awk '{print $4}'`
+
+ if [ $objects != '1000' ] || [ $bytes != '1K' ] ;
+ then
+ return 1
+ fi
+
+ ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it
+ teardown $dir || return 1
+}
+
+main testpoolquota