summaryrefslogtreecommitdiffstats
path: root/qa/standalone/osd
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /qa/standalone/osd
parentInitial commit. (diff)
downloadceph-upstream.tar.xz
ceph-upstream.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/standalone/osd')
-rwxr-xr-xqa/standalone/osd/bad-inc-map.sh62
-rwxr-xr-xqa/standalone/osd/divergent-priors.sh840
-rwxr-xr-xqa/standalone/osd/ec-error-rollforward.sh66
-rwxr-xr-xqa/standalone/osd/osd-backfill-prio.sh519
-rwxr-xr-xqa/standalone/osd/osd-backfill-recovery-log.sh136
-rwxr-xr-xqa/standalone/osd/osd-backfill-space.sh1175
-rwxr-xr-xqa/standalone/osd/osd-backfill-stats.sh753
-rwxr-xr-xqa/standalone/osd/osd-bench.sh96
-rwxr-xr-xqa/standalone/osd/osd-bluefs-volume-ops.sh346
-rwxr-xr-xqa/standalone/osd/osd-config.sh97
-rwxr-xr-xqa/standalone/osd/osd-copy-from.sh68
-rwxr-xr-xqa/standalone/osd/osd-dup.sh83
-rwxr-xr-xqa/standalone/osd/osd-fast-mark-down.sh116
-rwxr-xr-xqa/standalone/osd/osd-force-create-pg.sh52
-rwxr-xr-xqa/standalone/osd/osd-markdown.sh131
-rwxr-xr-xqa/standalone/osd/osd-reactivate.sh56
-rwxr-xr-xqa/standalone/osd/osd-recovery-prio.sh515
-rwxr-xr-xqa/standalone/osd/osd-recovery-space.sh175
-rwxr-xr-xqa/standalone/osd/osd-recovery-stats.sh512
-rwxr-xr-xqa/standalone/osd/osd-rep-recov-eio.sh476
-rwxr-xr-xqa/standalone/osd/osd-reuse-id.sh52
-rwxr-xr-xqa/standalone/osd/pg-split-merge.sh204
-rwxr-xr-xqa/standalone/osd/repro_long_log.sh152
23 files changed, 6682 insertions, 0 deletions
diff --git a/qa/standalone/osd/bad-inc-map.sh b/qa/standalone/osd/bad-inc-map.sh
new file mode 100755
index 00000000..cc3cf27c
--- /dev/null
+++ b/qa/standalone/osd/bad-inc-map.sh
@@ -0,0 +1,62 @@
+#!/usr/bin/env bash
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+mon_port=$(get_unused_port)
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:$mon_port"
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+ set -e
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_bad_inc_map() {
+ local dir=$1
+
+ run_mon $dir a
+ run_mgr $dir x
+ run_osd $dir 0
+ run_osd $dir 1
+ run_osd $dir 2
+
+ ceph config set osd.2 osd_inject_bad_map_crc_probability 1
+
+ # osd map churn
+ create_pool foo 8
+ ceph osd pool set foo min_size 1
+ ceph osd pool set foo min_size 2
+
+ sleep 5
+
+ # make sure all the OSDs are still up
+ TIMEOUT=10 wait_for_osd up 0
+ TIMEOUT=10 wait_for_osd up 1
+ TIMEOUT=10 wait_for_osd up 2
+
+ # check for the signature in the log
+ grep "injecting map crc failure" $dir/osd.2.log || return 1
+ grep "bailing because last" $dir/osd.2.log || return 1
+
+ echo success
+
+ delete_pool foo
+ kill_daemons $dir || return 1
+}
+
+main bad-inc-map "$@"
+
+# Local Variables:
+# compile-command: "make -j4 && ../qa/run-standalone.sh bad-inc-map.sh"
+# End:
diff --git a/qa/standalone/osd/divergent-priors.sh b/qa/standalone/osd/divergent-priors.sh
new file mode 100755
index 00000000..dec0e7ad
--- /dev/null
+++ b/qa/standalone/osd/divergent-priors.sh
@@ -0,0 +1,840 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2019 Red Hat <contact@redhat.com>
+#
+# Author: David Zafman <dzafman@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ # This should multiple of 6
+ export loglen=12
+ export divisor=3
+ export trim=$(expr $loglen / 2)
+ export DIVERGENT_WRITE=$(expr $trim / $divisor)
+ export DIVERGENT_REMOVE=$(expr $trim / $divisor)
+ export DIVERGENT_CREATE=$(expr $trim / $divisor)
+ export poolname=test
+ export testobjects=100
+ # Fix port????
+ export CEPH_MON="127.0.0.1:7115" # git grep '\<7115\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+ # so we will not force auth_log_shard to be acting_primary
+ CEPH_ARGS+="--osd_force_auth_primary_missing_objects=1000000 "
+ CEPH_ARGS+="--osd_debug_pg_log_writeout=true "
+ CEPH_ARGS+="--osd_min_pg_log_entries=$loglen --osd_max_pg_log_entries=$loglen --osd_pg_log_trim_min=$trim "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+
+# Special case divergence test
+# Test handling of divergent entries with prior_version
+# prior to log_tail
+# based on qa/tasks/divergent_prior.py
+function TEST_divergent() {
+ local dir=$1
+
+ # something that is always there
+ local dummyfile='/etc/fstab'
+ local dummyfile2='/etc/resolv.conf'
+
+ local num_osds=3
+ local osds="$(seq 0 $(expr $num_osds - 1))"
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ for i in $osds
+ do
+ run_osd $dir $i || return 1
+ done
+
+ ceph osd set noout
+ ceph osd set noin
+ ceph osd set nodown
+ create_pool $poolname 1 1
+ ceph osd pool set $poolname size 3
+ ceph osd pool set $poolname min_size 2
+
+ flush_pg_stats || return 1
+ wait_for_clean || return 1
+
+ # determine primary
+ local divergent="$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')"
+ echo "primary and soon to be divergent is $divergent"
+ ceph pg dump pgs
+ local non_divergent=""
+ for i in $osds
+ do
+ if [ "$i" = "$divergent" ]; then
+ continue
+ fi
+ non_divergent="$non_divergent $i"
+ done
+
+ echo "writing initial objects"
+ # write a bunch of objects
+ for i in $(seq 1 $testobjects)
+ do
+ rados -p $poolname put existing_$i $dummyfile
+ done
+
+ WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
+
+ local pgid=$(get_pg $poolname existing_1)
+
+ # blackhole non_divergent
+ echo "blackholing osds $non_divergent"
+ ceph pg dump pgs
+ for i in $non_divergent
+ do
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) config set objectstore_blackhole 1
+ done
+
+ local case5=$testobjects
+ local case3=$(expr $testobjects - 1)
+ # Write some soon to be divergent
+ echo 'writing divergent object'
+ rados -p $poolname put existing_$case5 $dummyfile &
+ echo 'create missing divergent object'
+ inject_eio rep data $poolname existing_$case3 $dir 0 || return 1
+ rados -p $poolname get existing_$case3 $dir/existing &
+ sleep 10
+ killall -9 rados
+
+ # kill all the osds but leave divergent in
+ echo 'killing all the osds'
+ ceph pg dump pgs
+ kill_daemons $dir KILL osd || return 1
+ for i in $osds
+ do
+ ceph osd down osd.$i
+ done
+ for i in $non_divergent
+ do
+ ceph osd out osd.$i
+ done
+
+ # bring up non-divergent
+ echo "bringing up non_divergent $non_divergent"
+ ceph pg dump pgs
+ for i in $non_divergent
+ do
+ activate_osd $dir $i || return 1
+ done
+ for i in $non_divergent
+ do
+ ceph osd in osd.$i
+ done
+
+ WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
+
+ # write 1 non-divergent object (ensure that old divergent one is divergent)
+ objname="existing_$(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE)"
+ echo "writing non-divergent object $objname"
+ ceph pg dump pgs
+ rados -p $poolname put $objname $dummyfile2
+
+ # ensure no recovery of up osds first
+ echo 'delay recovery'
+ ceph pg dump pgs
+ for i in $non_divergent
+ do
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) set_recovery_delay 100000
+ done
+
+ # bring in our divergent friend
+ echo "revive divergent $divergent"
+ ceph pg dump pgs
+ ceph osd set noup
+ activate_osd $dir $divergent
+ sleep 5
+
+ echo 'delay recovery divergent'
+ ceph pg dump pgs
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) set_recovery_delay 100000
+
+ ceph osd unset noup
+
+ wait_for_osd up 0
+ wait_for_osd up 1
+ wait_for_osd up 2
+
+ ceph pg dump pgs
+ echo 'wait for peering'
+ ceph pg dump pgs
+ rados -p $poolname put foo $dummyfile
+
+ echo "killing divergent $divergent"
+ ceph pg dump pgs
+ kill_daemons $dir KILL osd.$divergent
+ #_objectstore_tool_nodown $dir $divergent --op log --pgid $pgid
+ echo "reviving divergent $divergent"
+ ceph pg dump pgs
+ activate_osd $dir $divergent
+
+ sleep 20
+
+ echo "allowing recovery"
+ ceph pg dump pgs
+ # Set osd_recovery_delay_start back to 0 and kick the queue
+ for i in $osds
+ do
+ ceph tell osd.$i debug kick_recovery_wq 0
+ done
+
+ echo 'reading divergent objects'
+ ceph pg dump pgs
+ for i in $(seq 1 $(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE))
+ do
+ rados -p $poolname get existing_$i $dir/existing || return 1
+ done
+ rm -f $dir/existing
+
+ grep _merge_object_divergent_entries $(find $dir -name '*osd*log')
+ # Check for _merge_object_divergent_entries for case #5
+ if ! grep -q "_merge_object_divergent_entries.*cannot roll back, removing and adding to missing" $(find $dir -name '*osd*log')
+ then
+ echo failure
+ return 1
+ fi
+ echo "success"
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+function TEST_divergent_ec() {
+ local dir=$1
+
+ # something that is always there
+ local dummyfile='/etc/fstab'
+ local dummyfile2='/etc/resolv.conf'
+
+ local num_osds=3
+ local osds="$(seq 0 $(expr $num_osds - 1))"
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ for i in $osds
+ do
+ run_osd $dir $i || return 1
+ done
+
+ ceph osd set noout
+ ceph osd set noin
+ ceph osd set nodown
+ create_ec_pool $poolname true k=2 m=1 || return 1
+
+ flush_pg_stats || return 1
+ wait_for_clean || return 1
+
+ # determine primary
+ local divergent="$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')"
+ echo "primary and soon to be divergent is $divergent"
+ ceph pg dump pgs
+ local non_divergent=""
+ for i in $osds
+ do
+ if [ "$i" = "$divergent" ]; then
+ continue
+ fi
+ non_divergent="$non_divergent $i"
+ done
+
+ echo "writing initial objects"
+ # write a bunch of objects
+ for i in $(seq 1 $testobjects)
+ do
+ rados -p $poolname put existing_$i $dummyfile
+ done
+
+ WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
+
+ local pgid=$(get_pg $poolname existing_1)
+
+ # blackhole non_divergent
+ echo "blackholing osds $non_divergent"
+ ceph pg dump pgs
+ for i in $non_divergent
+ do
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) config set objectstore_blackhole 1
+ done
+
+ # Write some soon to be divergent
+ echo 'writing divergent object'
+ rados -p $poolname put existing_$testobjects $dummyfile2 &
+ sleep 1
+ rados -p $poolname put existing_$testobjects $dummyfile &
+ rados -p $poolname mksnap snap1
+ rados -p $poolname put existing_$(expr $testobjects - 1) $dummyfile &
+ sleep 10
+ killall -9 rados
+
+ # kill all the osds but leave divergent in
+ echo 'killing all the osds'
+ ceph pg dump pgs
+ kill_daemons $dir KILL osd || return 1
+ for i in $osds
+ do
+ ceph osd down osd.$i
+ done
+ for i in $non_divergent
+ do
+ ceph osd out osd.$i
+ done
+
+ # bring up non-divergent
+ echo "bringing up non_divergent $non_divergent"
+ ceph pg dump pgs
+ for i in $non_divergent
+ do
+ activate_osd $dir $i || return 1
+ done
+ for i in $non_divergent
+ do
+ ceph osd in osd.$i
+ done
+
+ sleep 5
+ #WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
+
+ # write 1 non-divergent object (ensure that old divergent one is divergent)
+ objname="existing_$(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE)"
+ echo "writing non-divergent object $objname"
+ ceph pg dump pgs
+ rados -p $poolname put $objname $dummyfile2
+
+ WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
+
+ # Dump logs
+ for i in $non_divergent
+ do
+ kill_daemons $dir KILL osd.$i || return 1
+ _objectstore_tool_nodown $dir $i --op log --pgid $pgid
+ activate_osd $dir $i || return 1
+ done
+ _objectstore_tool_nodown $dir $divergent --op log --pgid $pgid
+
+ WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
+
+ # ensure no recovery of up osds first
+ echo 'delay recovery'
+ ceph pg dump pgs
+ for i in $non_divergent
+ do
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) set_recovery_delay 100000
+ done
+
+ # bring in our divergent friend
+ echo "revive divergent $divergent"
+ ceph pg dump pgs
+ ceph osd set noup
+ activate_osd $dir $divergent
+ sleep 5
+
+ echo 'delay recovery divergent'
+ ceph pg dump pgs
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) set_recovery_delay 100000
+
+ ceph osd unset noup
+
+ wait_for_osd up 0
+ wait_for_osd up 1
+ wait_for_osd up 2
+
+ ceph pg dump pgs
+ echo 'wait for peering'
+ ceph pg dump pgs
+ rados -p $poolname put foo $dummyfile
+
+ echo "killing divergent $divergent"
+ ceph pg dump pgs
+ kill_daemons $dir KILL osd.$divergent
+ #_objectstore_tool_nodown $dir $divergent --op log --pgid $pgid
+ echo "reviving divergent $divergent"
+ ceph pg dump pgs
+ activate_osd $dir $divergent
+
+ sleep 20
+
+ echo "allowing recovery"
+ ceph pg dump pgs
+ # Set osd_recovery_delay_start back to 0 and kick the queue
+ for i in $osds
+ do
+ ceph tell osd.$i debug kick_recovery_wq 0
+ done
+
+ echo 'reading divergent objects'
+ ceph pg dump pgs
+ for i in $(seq 1 $(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE))
+ do
+ rados -p $poolname get existing_$i $dir/existing || return 1
+ done
+ rm -f $dir/existing
+
+ grep _merge_object_divergent_entries $(find $dir -name '*osd*log')
+ # Check for _merge_object_divergent_entries for case #3
+ # XXX: Not reproducing this case
+# if ! grep -q "_merge_object_divergent_entries.* missing, .* adjusting" $(find $dir -name '*osd*log')
+# then
+# echo failure
+# return 1
+# fi
+ # Check for _merge_object_divergent_entries for case #4
+ if ! grep -q "_merge_object_divergent_entries.*rolled back" $(find $dir -name '*osd*log')
+ then
+ echo failure
+ return 1
+ fi
+ echo "success"
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+# Special case divergence test with ceph-objectstore-tool export/remove/import
+# Test handling of divergent entries with prior_version
+# prior to log_tail and a ceph-objectstore-tool export/import
+# based on qa/tasks/divergent_prior2.py
+function TEST_divergent_2() {
+ local dir=$1
+
+ # something that is always there
+ local dummyfile='/etc/fstab'
+ local dummyfile2='/etc/resolv.conf'
+
+ local num_osds=3
+ local osds="$(seq 0 $(expr $num_osds - 1))"
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ for i in $osds
+ do
+ run_osd $dir $i || return 1
+ done
+
+ ceph osd set noout
+ ceph osd set noin
+ ceph osd set nodown
+ create_pool $poolname 1 1
+ ceph osd pool set $poolname size 3
+ ceph osd pool set $poolname min_size 2
+
+ flush_pg_stats || return 1
+ wait_for_clean || return 1
+
+ # determine primary
+ local divergent="$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')"
+ echo "primary and soon to be divergent is $divergent"
+ ceph pg dump pgs
+ local non_divergent=""
+ for i in $osds
+ do
+ if [ "$i" = "$divergent" ]; then
+ continue
+ fi
+ non_divergent="$non_divergent $i"
+ done
+
+ echo "writing initial objects"
+ # write a bunch of objects
+ for i in $(seq 1 $testobjects)
+ do
+ rados -p $poolname put existing_$i $dummyfile
+ done
+
+ WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
+
+ local pgid=$(get_pg $poolname existing_1)
+
+ # blackhole non_divergent
+ echo "blackholing osds $non_divergent"
+ ceph pg dump pgs
+ for i in $non_divergent
+ do
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) config set objectstore_blackhole 1
+ done
+
+ # Do some creates to hit case 2
+ echo 'create new divergent objects'
+ for i in $(seq 1 $DIVERGENT_CREATE)
+ do
+ rados -p $poolname create newobject_$i &
+ done
+ # Write some soon to be divergent
+ echo 'writing divergent objects'
+ for i in $(seq 1 $DIVERGENT_WRITE)
+ do
+ rados -p $poolname put existing_$i $dummyfile2 &
+ done
+ # Remove some soon to be divergent
+ echo 'remove divergent objects'
+ for i in $(seq 1 $DIVERGENT_REMOVE)
+ do
+ rmi=$(expr $i + $DIVERGENT_WRITE)
+ rados -p $poolname rm existing_$rmi &
+ done
+ sleep 10
+ killall -9 rados
+
+ # kill all the osds but leave divergent in
+ echo 'killing all the osds'
+ ceph pg dump pgs
+ kill_daemons $dir KILL osd || return 1
+ for i in $osds
+ do
+ ceph osd down osd.$i
+ done
+ for i in $non_divergent
+ do
+ ceph osd out osd.$i
+ done
+
+ # bring up non-divergent
+ echo "bringing up non_divergent $non_divergent"
+ ceph pg dump pgs
+ for i in $non_divergent
+ do
+ activate_osd $dir $i || return 1
+ done
+ for i in $non_divergent
+ do
+ ceph osd in osd.$i
+ done
+
+ WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
+
+ # write 1 non-divergent object (ensure that old divergent one is divergent)
+ objname="existing_$(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE)"
+ echo "writing non-divergent object $objname"
+ ceph pg dump pgs
+ rados -p $poolname put $objname $dummyfile2
+
+ WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
+
+ # ensure no recovery of up osds first
+ echo 'delay recovery'
+ ceph pg dump pgs
+ for i in $non_divergent
+ do
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) set_recovery_delay 100000
+ done
+
+ # bring in our divergent friend
+ echo "revive divergent $divergent"
+ ceph pg dump pgs
+ ceph osd set noup
+ activate_osd $dir $divergent
+ sleep 5
+
+ echo 'delay recovery divergent'
+ ceph pg dump pgs
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) set_recovery_delay 100000
+
+ ceph osd unset noup
+
+ wait_for_osd up 0
+ wait_for_osd up 1
+ wait_for_osd up 2
+
+ ceph pg dump pgs
+ echo 'wait for peering'
+ ceph pg dump pgs
+ rados -p $poolname put foo $dummyfile
+
+ # At this point the divergent_priors should have been detected
+
+ echo "killing divergent $divergent"
+ ceph pg dump pgs
+ kill_daemons $dir KILL osd.$divergent
+
+ # export a pg
+ expfile=$dir/exp.$$.out
+ _objectstore_tool_nodown $dir $divergent --op export-remove --pgid $pgid --file $expfile
+ _objectstore_tool_nodown $dir $divergent --op import --file $expfile
+
+ echo "reviving divergent $divergent"
+ ceph pg dump pgs
+ activate_osd $dir $divergent
+ wait_for_osd up $divergent
+
+ sleep 20
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) dump_ops_in_flight
+
+ echo "allowing recovery"
+ ceph pg dump pgs
+ # Set osd_recovery_delay_start back to 0 and kick the queue
+ for i in $osds
+ do
+ ceph tell osd.$i debug kick_recovery_wq 0
+ done
+
+ echo 'reading divergent objects'
+ ceph pg dump pgs
+ for i in $(seq 1 $(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE))
+ do
+ rados -p $poolname get existing_$i $dir/existing || return 1
+ done
+ for i in $(seq 1 $DIVERGENT_CREATE)
+ do
+ rados -p $poolname get newobject_$i $dir/existing
+ done
+ rm -f $dir/existing
+
+ grep _merge_object_divergent_entries $(find $dir -name '*osd*log')
+ # Check for _merge_object_divergent_entries for case #1
+ if ! grep -q "_merge_object_divergent_entries: more recent entry found:" $(find $dir -name '*osd*log')
+ then
+ echo failure
+ return 1
+ fi
+ # Check for _merge_object_divergent_entries for case #2
+ if ! grep -q "_merge_object_divergent_entries.*prior_version or op type indicates creation" $(find $dir -name '*osd*log')
+ then
+ echo failure
+ return 1
+ fi
+ echo "success"
+
+ rm $dir/$expfile
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+# this is the same as case _2 above, except we enable pg autoscaling in order
+# to reproduce https://tracker.ceph.com/issues/41816
+function TEST_divergent_3() {
+ local dir=$1
+
+ # something that is always there
+ local dummyfile='/etc/fstab'
+ local dummyfile2='/etc/resolv.conf'
+
+ local num_osds=3
+ local osds="$(seq 0 $(expr $num_osds - 1))"
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ for i in $osds
+ do
+ run_osd $dir $i || return 1
+ done
+
+ ceph osd set noout
+ ceph osd set noin
+ ceph osd set nodown
+ create_pool $poolname 1 1
+ ceph osd pool set $poolname size 3
+ ceph osd pool set $poolname min_size 2
+
+ # reproduce https://tracker.ceph.com/issues/41816
+ ceph osd pool set $poolname pg_autoscale_mode on
+
+ flush_pg_stats || return 1
+ wait_for_clean || return 1
+
+ # determine primary
+ local divergent="$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')"
+ echo "primary and soon to be divergent is $divergent"
+ ceph pg dump pgs
+ local non_divergent=""
+ for i in $osds
+ do
+ if [ "$i" = "$divergent" ]; then
+ continue
+ fi
+ non_divergent="$non_divergent $i"
+ done
+
+ echo "writing initial objects"
+ # write a bunch of objects
+ for i in $(seq 1 $testobjects)
+ do
+ rados -p $poolname put existing_$i $dummyfile
+ done
+
+ WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
+
+ local pgid=$(get_pg $poolname existing_1)
+
+ # blackhole non_divergent
+ echo "blackholing osds $non_divergent"
+ ceph pg dump pgs
+ for i in $non_divergent
+ do
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) config set objectstore_blackhole 1
+ done
+
+ # Do some creates to hit case 2
+ echo 'create new divergent objects'
+ for i in $(seq 1 $DIVERGENT_CREATE)
+ do
+ rados -p $poolname create newobject_$i &
+ done
+ # Write some soon to be divergent
+ echo 'writing divergent objects'
+ for i in $(seq 1 $DIVERGENT_WRITE)
+ do
+ rados -p $poolname put existing_$i $dummyfile2 &
+ done
+ # Remove some soon to be divergent
+ echo 'remove divergent objects'
+ for i in $(seq 1 $DIVERGENT_REMOVE)
+ do
+ rmi=$(expr $i + $DIVERGENT_WRITE)
+ rados -p $poolname rm existing_$rmi &
+ done
+ sleep 10
+ killall -9 rados
+
+ # kill all the osds but leave divergent in
+ echo 'killing all the osds'
+ ceph pg dump pgs
+ kill_daemons $dir KILL osd || return 1
+ for i in $osds
+ do
+ ceph osd down osd.$i
+ done
+ for i in $non_divergent
+ do
+ ceph osd out osd.$i
+ done
+
+ # bring up non-divergent
+ echo "bringing up non_divergent $non_divergent"
+ ceph pg dump pgs
+ for i in $non_divergent
+ do
+ activate_osd $dir $i || return 1
+ done
+ for i in $non_divergent
+ do
+ ceph osd in osd.$i
+ done
+
+ WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
+
+ # write 1 non-divergent object (ensure that old divergent one is divergent)
+ objname="existing_$(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE)"
+ echo "writing non-divergent object $objname"
+ ceph pg dump pgs
+ rados -p $poolname put $objname $dummyfile2
+
+ WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean
+
+ # ensure no recovery of up osds first
+ echo 'delay recovery'
+ ceph pg dump pgs
+ for i in $non_divergent
+ do
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) set_recovery_delay 100000
+ done
+
+ # bring in our divergent friend
+ echo "revive divergent $divergent"
+ ceph pg dump pgs
+ ceph osd set noup
+ activate_osd $dir $divergent
+ sleep 5
+
+ echo 'delay recovery divergent'
+ ceph pg dump pgs
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) set_recovery_delay 100000
+
+ ceph osd unset noup
+
+ wait_for_osd up 0
+ wait_for_osd up 1
+ wait_for_osd up 2
+
+ ceph pg dump pgs
+ echo 'wait for peering'
+ ceph pg dump pgs
+ rados -p $poolname put foo $dummyfile
+
+ # At this point the divergent_priors should have been detected
+
+ echo "killing divergent $divergent"
+ ceph pg dump pgs
+ kill_daemons $dir KILL osd.$divergent
+
+ # export a pg
+ expfile=$dir/exp.$$.out
+ _objectstore_tool_nodown $dir $divergent --op export-remove --pgid $pgid --file $expfile
+ _objectstore_tool_nodown $dir $divergent --op import --file $expfile
+
+ echo "reviving divergent $divergent"
+ ceph pg dump pgs
+ activate_osd $dir $divergent
+ wait_for_osd up $divergent
+
+ sleep 20
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) dump_ops_in_flight
+
+ echo "allowing recovery"
+ ceph pg dump pgs
+ # Set osd_recovery_delay_start back to 0 and kick the queue
+ for i in $osds
+ do
+ ceph tell osd.$i debug kick_recovery_wq 0
+ done
+
+ echo 'reading divergent objects'
+ ceph pg dump pgs
+ for i in $(seq 1 $(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE))
+ do
+ rados -p $poolname get existing_$i $dir/existing || return 1
+ done
+ for i in $(seq 1 $DIVERGENT_CREATE)
+ do
+ rados -p $poolname get newobject_$i $dir/existing
+ done
+ rm -f $dir/existing
+
+ grep _merge_object_divergent_entries $(find $dir -name '*osd*log')
+ # Check for _merge_object_divergent_entries for case #1
+ if ! grep -q "_merge_object_divergent_entries: more recent entry found:" $(find $dir -name '*osd*log')
+ then
+ echo failure
+ return 1
+ fi
+ # Check for _merge_object_divergent_entries for case #2
+ if ! grep -q "_merge_object_divergent_entries.*prior_version or op type indicates creation" $(find $dir -name '*osd*log')
+ then
+ echo failure
+ return 1
+ fi
+ echo "success"
+
+ rm $dir/$expfile
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+
+main divergent-priors "$@"
+
+# Local Variables:
+# compile-command: "make -j4 && ../qa/run-standalone.sh divergent-priors.sh"
+# End:
diff --git a/qa/standalone/osd/ec-error-rollforward.sh b/qa/standalone/osd/ec-error-rollforward.sh
new file mode 100755
index 00000000..621e6b13
--- /dev/null
+++ b/qa/standalone/osd/ec-error-rollforward.sh
@@ -0,0 +1,66 @@
+#!/usr/bin/env bash
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ # Fix port????
+ export CEPH_MON="127.0.0.1:7132" # git grep '\<7132\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+ export margin=10
+ export objects=200
+ export poolname=test
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_ec_error_rollforward() {
+ local dir=$1
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+
+ ceph osd erasure-code-profile set ec-profile m=2 k=2 crush-failure-domain=osd
+ ceph osd pool create ec 1 1 erasure ec-profile
+
+ rados -p ec put foo /etc/passwd
+
+ kill -STOP $(cat $dir/osd.2.pid)
+
+ rados -p ec rm foo &
+ pids="$!"
+ sleep 1
+ rados -p ec rm a &
+ pids+=" $!"
+ rados -p ec rm b &
+ pids+=" $!"
+ rados -p ec rm c &
+ pids+=" $!"
+ sleep 1
+ # Use SIGKILL so stopped osd.2 will terminate
+ # and kill_daemons waits for daemons to die
+ kill_daemons $dir KILL osd
+ kill $pids
+ wait
+
+ activate_osd $dir 0 || return 1
+ activate_osd $dir 1 || return 1
+ activate_osd $dir 2 || return 1
+ activate_osd $dir 3 || return 1
+
+ wait_for_clean || return 1
+}
+
+main ec-error-rollforward "$@"
diff --git a/qa/standalone/osd/osd-backfill-prio.sh b/qa/standalone/osd/osd-backfill-prio.sh
new file mode 100755
index 00000000..a089696b
--- /dev/null
+++ b/qa/standalone/osd/osd-backfill-prio.sh
@@ -0,0 +1,519 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2019 Red Hat <contact@redhat.com>
+#
+# Author: David Zafman <dzafman@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ # Fix port????
+ export CEPH_MON="127.0.0.1:7114" # git grep '\<7114\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON --osd_max_backfills=1 --debug_reserver=20 "
+ CEPH_ARGS+="--osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10 "
+ export objects=50
+ export poolprefix=test
+ export FORCE_PRIO="254" # See OSD_BACKFILL_PRIORITY_FORCED
+ export DEGRADED_PRIO="150" # See OSD_BACKFILL_DEGRADED_PRIORITY_BASE + 10
+ export NORMAL_PRIO="110" # See OSD_BACKFILL_PRIORITY_BASE + 10
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+
+function TEST_backfill_priority() {
+ local dir=$1
+ local pools=10
+ local OSDS=5
+ # size 2 -> 1 means degraded by 1, so add 1 to base prio
+ local degraded_prio=$(expr $DEGRADED_PRIO + 1)
+ local max_tries=10
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ export CEPH_ARGS
+
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ run_osd $dir $osd || return 1
+ done
+
+ for p in $(seq 1 $pools)
+ do
+ create_pool "${poolprefix}$p" 1 1
+ ceph osd pool set "${poolprefix}$p" size 2
+ done
+ sleep 5
+
+ wait_for_clean || return 1
+
+ ceph pg dump pgs
+
+ # Find 3 pools with a pg with the same primaries but second
+ # replica on another osd.
+ local PG1
+ local POOLNUM1
+ local pool1
+ local chk_osd1_1
+ local chk_osd1_2
+
+ local PG2
+ local POOLNUM2
+ local pool2
+ local chk_osd2
+
+ local PG3
+ local POOLNUM3
+ local pool3
+
+ for p in $(seq 1 $pools)
+ do
+ ceph pg map ${p}.0 --format=json | jq '.acting[]' > $dir/acting
+ local test_osd1=$(head -1 $dir/acting)
+ local test_osd2=$(tail -1 $dir/acting)
+ if [ -z "$PG1" ];
+ then
+ PG1="${p}.0"
+ POOLNUM1=$p
+ pool1="${poolprefix}$p"
+ chk_osd1_1=$test_osd1
+ chk_osd1_2=$test_osd2
+ elif [ -z "$PG2" -a $chk_osd1_1 = $test_osd1 -a $chk_osd1_2 != $test_osd2 ];
+ then
+ PG2="${p}.0"
+ POOLNUM2=$p
+ pool2="${poolprefix}$p"
+ chk_osd2=$test_osd2
+ elif [ -n "$PG2" -a $chk_osd1_1 = $test_osd1 -a $chk_osd1_2 != $test_osd2 -a "$chk_osd2" != $test_osd2 ];
+ then
+ PG3="${p}.0"
+ POOLNUM3=$p
+ pool3="${poolprefix}$p"
+ break
+ fi
+ done
+ rm -f $dir/acting
+
+ if [ "$pool2" = "" -o "pool3" = "" ];
+ then
+ echo "Failure to find appropirate PGs"
+ return 1
+ fi
+
+ for p in $(seq 1 $pools)
+ do
+ if [ $p != $POOLNUM1 -a $p != $POOLNUM2 -a $p != $POOLNUM3 ];
+ then
+ delete_pool ${poolprefix}$p
+ fi
+ done
+
+ ceph osd pool set $pool2 size 1
+ ceph osd pool set $pool3 size 1
+ wait_for_clean || return 1
+
+ dd if=/dev/urandom of=$dir/data bs=1M count=10
+ p=1
+ for pname in $pool1 $pool2 $pool3
+ do
+ for i in $(seq 1 $objects)
+ do
+ rados -p ${pname} put obj${i}-p${p} $dir/data
+ done
+ p=$(expr $p + 1)
+ done
+
+ local otherosd=$(get_not_primary $pool1 obj1-p1)
+
+ ceph pg dump pgs
+ ERRORS=0
+
+ ceph osd set nobackfill
+ ceph osd set noout
+
+ # Get a pg to want to backfill and quickly force it
+ # to be preempted.
+ ceph osd pool set $pool3 size 2
+ sleep 2
+
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1
+
+ # 3. Item is in progress, adjust priority with no higher priority waiting
+ for i in $(seq 1 $max_tries)
+ do
+ if ! ceph pg force-backfill $PG3 2>&1 | grep -q "doesn't require backfilling"; then
+ break
+ fi
+ if [ "$i" = "$max_tries" ]; then
+ echo "ERROR: Didn't appear to be able to force-backfill"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ sleep 2
+ done
+ flush_pg_stats || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1
+
+ ceph osd out osd.$chk_osd1_2
+ sleep 2
+ flush_pg_stats || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1
+ ceph pg dump pgs
+
+ ceph osd pool set $pool2 size 2
+ sleep 2
+ flush_pg_stats || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1
+ cat $dir/out
+ ceph pg dump pgs
+
+ PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG1}\")).prio")
+ if [ "$PRIO" != "$NORMAL_PRIO" ];
+ then
+ echo "The normal PG ${PG1} doesn't have prio $NORMAL_PRIO queued waiting"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+
+ # Using eval will strip double-quotes from item
+ eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item')
+ if [ "$ITEM" != ${PG3} ];
+ then
+ echo "The force-backfill PG $PG3 didn't become the in progress item"
+ ERRORS=$(expr $ERRORS + 1)
+ else
+ PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio')
+ if [ "$PRIO" != $FORCE_PRIO ];
+ then
+ echo "The force-backfill PG ${PG3} doesn't have prio $FORCE_PRIO"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ fi
+
+ # 1. Item is queued, re-queue with new priority
+ for i in $(seq 1 $max_tries)
+ do
+ if ! ceph pg force-backfill $PG2 2>&1 | grep -q "doesn't require backfilling"; then
+ break
+ fi
+ if [ "$i" = "$max_tries" ]; then
+ echo "ERROR: Didn't appear to be able to force-backfill"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ sleep 2
+ done
+ sleep 2
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1
+ cat $dir/out
+ PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG2}\")).prio")
+ if [ "$PRIO" != "$FORCE_PRIO" ];
+ then
+ echo "The second force-backfill PG ${PG2} doesn't have prio $FORCE_PRIO"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ flush_pg_stats || return 1
+
+ # 4. Item is in progress, if higher priority items waiting prempt item
+ ceph pg cancel-force-backfill $PG3 || return 1
+ sleep 2
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1
+ cat $dir/out
+ PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG3}\")).prio")
+ if [ "$PRIO" != "$degraded_prio" ];
+ then
+ echo "After cancel-force-backfill PG ${PG3} doesn't have prio $degraded_prio"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+
+ eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item')
+ if [ "$ITEM" != ${PG2} ];
+ then
+ echo "The force-recovery PG $PG2 didn't become the in progress item"
+ ERRORS=$(expr $ERRORS + 1)
+ else
+ PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio')
+ if [ "$PRIO" != $FORCE_PRIO ];
+ then
+ echo "The first force-recovery PG ${PG2} doesn't have prio $FORCE_PRIO"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ fi
+
+ ceph pg cancel-force-backfill $PG2 || return 1
+ sleep 5
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1
+
+ # 2. Item is queued, re-queue and preempt because new priority higher than an in progress item
+ flush_pg_stats || return 1
+ ceph pg force-backfill $PG3 || return 1
+ sleep 2
+
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1
+ cat $dir/out
+ PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG2}\")).prio")
+ if [ "$PRIO" != "$degraded_prio" ];
+ then
+ echo "After cancel-force-backfill PG ${PG2} doesn't have prio $degraded_prio"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+
+ eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item')
+ if [ "$ITEM" != ${PG3} ];
+ then
+ echo "The force-backfill PG $PG3 didn't get promoted to an in progress item"
+ ERRORS=$(expr $ERRORS + 1)
+ else
+ PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio')
+ if [ "$PRIO" != $FORCE_PRIO ];
+ then
+ echo "The force-backfill PG ${PG2} doesn't have prio $FORCE_PRIO"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ fi
+
+ ceph osd unset noout
+ ceph osd unset nobackfill
+
+ wait_for_clean "CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations" || return 1
+
+ ceph pg dump pgs
+
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_pgstate_history
+
+ if [ $ERRORS != "0" ];
+ then
+ echo "$ERRORS error(s) found"
+ else
+ echo TEST PASSED
+ fi
+
+ delete_pool $pool1
+ delete_pool $pool2
+ delete_pool $pool3
+ kill_daemons $dir || return 1
+ return $ERRORS
+}
+
+#
+# Show that pool recovery_priority is added to the backfill priority
+#
+# Create 2 pools with 2 OSDs with different primarys
+# pool 1 with recovery_priority 1
+# pool 2 with recovery_priority 2
+#
+# Start backfill by changing the pool sizes from 1 to 2
+# Use dump_recovery_reservations to verify priorities
+function TEST_backfill_pool_priority() {
+ local dir=$1
+ local pools=3 # Don't assume the first 2 pools are exact what we want
+ local OSDS=2
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ export CEPH_ARGS
+
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ run_osd $dir $osd || return 1
+ done
+
+ for p in $(seq 1 $pools)
+ do
+ create_pool "${poolprefix}$p" 1 1
+ ceph osd pool set "${poolprefix}$p" size 2
+ done
+ sleep 5
+
+ wait_for_clean || return 1
+
+ ceph pg dump pgs
+
+ # Find 2 pools with different primaries which
+ # means the replica must be on another osd.
+ local PG1
+ local POOLNUM1
+ local pool1
+ local chk_osd1_1
+ local chk_osd1_2
+
+ local PG2
+ local POOLNUM2
+ local pool2
+ local chk_osd2_1
+ local chk_osd2_2
+
+ for p in $(seq 1 $pools)
+ do
+ ceph pg map ${p}.0 --format=json | jq '.acting[]' > $dir/acting
+ local test_osd1=$(head -1 $dir/acting)
+ local test_osd2=$(tail -1 $dir/acting)
+ if [ -z "$PG1" ];
+ then
+ PG1="${p}.0"
+ POOLNUM1=$p
+ pool1="${poolprefix}$p"
+ chk_osd1_1=$test_osd1
+ chk_osd1_2=$test_osd2
+ elif [ $chk_osd1_1 != $test_osd1 ];
+ then
+ PG2="${p}.0"
+ POOLNUM2=$p
+ pool2="${poolprefix}$p"
+ chk_osd2_1=$test_osd1
+ chk_osd2_2=$test_osd2
+ break
+ fi
+ done
+ rm -f $dir/acting
+
+ if [ "$pool2" = "" ];
+ then
+ echo "Failure to find appropirate PGs"
+ return 1
+ fi
+
+ for p in $(seq 1 $pools)
+ do
+ if [ $p != $POOLNUM1 -a $p != $POOLNUM2 ];
+ then
+ delete_pool ${poolprefix}$p
+ fi
+ done
+
+ pool1_extra_prio=1
+ pool2_extra_prio=2
+ # size 2 -> 1 means degraded by 1, so add 1 to base prio
+ pool1_prio=$(expr $DEGRADED_PRIO + 1 + $pool1_extra_prio)
+ pool2_prio=$(expr $DEGRADED_PRIO + 1 + $pool2_extra_prio)
+
+ ceph osd pool set $pool1 size 1
+ ceph osd pool set $pool1 recovery_priority $pool1_extra_prio
+ ceph osd pool set $pool2 size 1
+ ceph osd pool set $pool2 recovery_priority $pool2_extra_prio
+ wait_for_clean || return 1
+
+ dd if=/dev/urandom of=$dir/data bs=1M count=10
+ p=1
+ for pname in $pool1 $pool2
+ do
+ for i in $(seq 1 $objects)
+ do
+ rados -p ${pname} put obj${i}-p${p} $dir/data
+ done
+ p=$(expr $p + 1)
+ done
+
+ local otherosd=$(get_not_primary $pool1 obj1-p1)
+
+ ceph pg dump pgs
+ ERRORS=0
+
+ ceph osd pool set $pool1 size 2
+ ceph osd pool set $pool2 size 2
+ sleep 5
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/dump.${chk_osd1_1}.out
+ echo osd.${chk_osd1_1}
+ cat $dir/dump.${chk_osd1_1}.out
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_2}) dump_recovery_reservations > $dir/dump.${chk_osd1_2}.out
+ echo osd.${chk_osd1_2}
+ cat $dir/dump.${chk_osd1_2}.out
+
+ # Using eval will strip double-quotes from item
+ eval ITEM=$(cat $dir/dump.${chk_osd1_1}.out | jq '.local_reservations.in_progress[0].item')
+ if [ "$ITEM" != ${PG1} ];
+ then
+ echo "The primary PG ${PG1} didn't become the in progress item"
+ ERRORS=$(expr $ERRORS + 1)
+ else
+ PRIO=$(cat $dir/dump.${chk_osd1_1}.out | jq '.local_reservations.in_progress[0].prio')
+ if [ "$PRIO" != $pool1_prio ];
+ then
+ echo "The primary PG ${PG1} doesn't have prio $pool1_prio"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ fi
+
+ # Using eval will strip double-quotes from item
+ eval ITEM=$(cat $dir/dump.${chk_osd1_2}.out | jq '.remote_reservations.in_progress[0].item')
+ if [ "$ITEM" != ${PG1} ];
+ then
+ echo "The primary PG ${PG1} didn't become the in progress item on remote"
+ ERRORS=$(expr $ERRORS + 1)
+ else
+ PRIO=$(cat $dir/dump.${chk_osd1_2}.out | jq '.remote_reservations.in_progress[0].prio')
+ if [ "$PRIO" != $pool1_prio ];
+ then
+ echo "The primary PG ${PG1} doesn't have prio $pool1_prio on remote"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ fi
+
+ # Using eval will strip double-quotes from item
+ eval ITEM=$(cat $dir/dump.${chk_osd2_1}.out | jq '.local_reservations.in_progress[0].item')
+ if [ "$ITEM" != ${PG2} ];
+ then
+ echo "The primary PG ${PG2} didn't become the in progress item"
+ ERRORS=$(expr $ERRORS + 1)
+ else
+ PRIO=$(cat $dir/dump.${chk_osd2_1}.out | jq '.local_reservations.in_progress[0].prio')
+ if [ "$PRIO" != $pool2_prio ];
+ then
+ echo "The primary PG ${PG2} doesn't have prio $pool2_prio"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ fi
+
+ # Using eval will strip double-quotes from item
+ eval ITEM=$(cat $dir/dump.${chk_osd2_2}.out | jq '.remote_reservations.in_progress[0].item')
+ if [ "$ITEM" != ${PG2} ];
+ then
+ echo "The primary PG $PG2 didn't become the in progress item on remote"
+ ERRORS=$(expr $ERRORS + 1)
+ else
+ PRIO=$(cat $dir/dump.${chk_osd2_2}.out | jq '.remote_reservations.in_progress[0].prio')
+ if [ "$PRIO" != $pool2_prio ];
+ then
+ echo "The primary PG ${PG2} doesn't have prio $pool2_prio on remote"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ fi
+
+ wait_for_clean || return 1
+
+ if [ $ERRORS != "0" ];
+ then
+ echo "$ERRORS error(s) found"
+ else
+ echo TEST PASSED
+ fi
+
+ delete_pool $pool1
+ delete_pool $pool2
+ kill_daemons $dir || return 1
+ return $ERRORS
+}
+
+main osd-backfill-prio "$@"
+
+# Local Variables:
+# compile-command: "make -j4 && ../qa/run-standalone.sh osd-backfill-prio.sh"
+# End:
diff --git a/qa/standalone/osd/osd-backfill-recovery-log.sh b/qa/standalone/osd/osd-backfill-recovery-log.sh
new file mode 100755
index 00000000..e55250e8
--- /dev/null
+++ b/qa/standalone/osd/osd-backfill-recovery-log.sh
@@ -0,0 +1,136 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2019 Red Hat <contact@redhat.com>
+#
+# Author: David Zafman <dzafman@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ # Fix port????
+ export CEPH_MON="127.0.0.1:7129" # git grep '\<7129\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON --osd_max_backfills=1 --debug_reserver=20 "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+
+function _common_test() {
+ local dir=$1
+ local extra_opts="$2"
+ local loglen="$3"
+ local dupslen="$4"
+ local objects="$5"
+ local moreobjects=${6:-0}
+
+ local OSDS=6
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ export CEPH_ARGS
+ export EXTRA_OPTS=" $extra_opts"
+
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ run_osd $dir $osd || return 1
+ done
+
+ create_pool test 1 1
+
+ for j in $(seq 1 $objects)
+ do
+ rados -p test put obj-${j} /etc/passwd
+ done
+
+ # Mark out all OSDs for this pool
+ ceph osd out $(ceph pg dump pgs --format=json | jq '.pg_stats[0].up[]')
+ if [ "$moreobjects" != "0" ]; then
+ for j in $(seq 1 $moreobjects)
+ do
+ rados -p test put obj-more-${j} /etc/passwd
+ done
+ fi
+ sleep 1
+ wait_for_clean
+
+ newprimary=$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')
+ kill_daemons
+
+ ERRORS=0
+ _objectstore_tool_nodown $dir $newprimary --no-mon-config --pgid 1.0 --op log | tee $dir/result.log
+ LOGLEN=$(jq '.pg_log_t.log | length' $dir/result.log)
+ if [ $LOGLEN != "$loglen" ]; then
+ echo "FAILED: Wrong log length got $LOGLEN (expected $loglen)"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ DUPSLEN=$(jq '.pg_log_t.dups | length' $dir/result.log)
+ if [ $DUPSLEN != "$dupslen" ]; then
+ echo "FAILED: Wrong dups length got $DUPSLEN (expected $dupslen)"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ grep "copy_up_to\|copy_after" $dir/osd.*.log
+ rm -f $dir/result.log
+ if [ $ERRORS != "0" ]; then
+ echo TEST FAILED
+ return 1
+ fi
+}
+
+
+# Cause copy_up_to() to only partially copy logs, copy additional dups, and trim dups
+function TEST_backfill_log_1() {
+ local dir=$1
+
+ _common_test $dir "--osd_min_pg_log_entries=1 --osd_max_pg_log_entries=2 --osd_pg_log_dups_tracked=10" 1 9 150
+}
+
+
+# Cause copy_up_to() to only partially copy logs, copy additional dups
+function TEST_backfill_log_2() {
+ local dir=$1
+
+ _common_test $dir "--osd_min_pg_log_entries=1 --osd_max_pg_log_entries=2" 1 149 150
+}
+
+
+# Cause copy_after() to only copy logs, no dups
+function TEST_recovery_1() {
+ local dir=$1
+
+ _common_test $dir "--osd_min_pg_log_entries=50 --osd_max_pg_log_entries=50 --osd_pg_log_dups_tracked=60 --osd_pg_log_trim_min=10" 40 0 40
+}
+
+
+# Cause copy_after() to copy logs with dups
+function TEST_recovery_2() {
+ local dir=$1
+
+ _common_test $dir "--osd_min_pg_log_entries=150 --osd_max_pg_log_entries=150 --osd_pg_log_dups_tracked=3000 --osd_pg_log_trim_min=10" 151 10 141 20
+}
+
+main osd-backfill-recovery-log "$@"
+
+# Local Variables:
+# compile-command: "make -j4 && ../qa/run-standalone.sh osd-backfill-recovery-log.sh"
+# End:
diff --git a/qa/standalone/osd/osd-backfill-space.sh b/qa/standalone/osd/osd-backfill-space.sh
new file mode 100755
index 00000000..3978668e
--- /dev/null
+++ b/qa/standalone/osd/osd-backfill-space.sh
@@ -0,0 +1,1175 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2018 Red Hat <contact@redhat.com>
+#
+# Author: David Zafman <dzafman@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7180" # git grep '\<7180\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+ CEPH_ARGS+="--osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10 "
+ CEPH_ARGS+="--fake_statfs_for_testing=3686400 "
+ CEPH_ARGS+="--osd_max_backfills=10 "
+ export objects=600
+ export poolprefix=test
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+
+function get_num_in_state() {
+ local state=$1
+ local expression
+ expression+="select(contains(\"${state}\"))"
+ ceph --format json pg dump pgs 2>/dev/null | \
+ jq ".pg_stats | [.[] | .state | $expression] | length"
+}
+
+
+function wait_for_not_state() {
+ local state=$1
+ local num_in_state=-1
+ local cur_in_state
+ local -a delays=($(get_timeout_delays $2 5))
+ local -i loop=0
+
+ flush_pg_stats || return 1
+ while test $(get_num_pgs) == 0 ; do
+ sleep 1
+ done
+
+ while true ; do
+ cur_in_state=$(get_num_in_state ${state})
+ test $cur_in_state = "0" && break
+ if test $cur_in_state != $num_in_state ; then
+ loop=0
+ num_in_state=$cur_in_state
+ elif (( $loop >= ${#delays[*]} )) ; then
+ ceph pg dump pgs
+ return 1
+ fi
+ sleep ${delays[$loop]}
+ loop+=1
+ done
+ return 0
+}
+
+
+function wait_for_not_backfilling() {
+ local timeout=$1
+ wait_for_not_state backfilling $timeout
+}
+
+
+function wait_for_not_activating() {
+ local timeout=$1
+ wait_for_not_state activating $timeout
+}
+
+# All tests are created in an environment which has fake total space
+# of 3600K (3686400) which can hold 600 6K replicated objects or
+# 200 18K shards of erasure coded objects. For a k=3, m=2 EC pool
+# we have a theoretical 54K object but with the chunk size of 4K
+# and a rounding of 4K to account for the chunks is 36K max object
+# which is ((36K / 3) + 4K) * 200 = 3200K which is 88% of
+# 3600K for a shard.
+
+# Create 2 pools with size 1
+# Write enough data that only 1 pool pg can fit per osd
+# Incresase the pool size to 2
+# On 3 OSDs this should result in 1 OSD with overlapping replicas,
+# so both pools can't fit. We assume pgid 1.0 and 2.0 won't
+# map to the same 2 OSDs.
+# At least 1 pool shouldn't have room to backfill
+# All other pools should go active+clean
+function TEST_backfill_test_simple() {
+ local dir=$1
+ local pools=2
+ local OSDS=3
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ export CEPH_ARGS
+
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ run_osd $dir $osd || return 1
+ done
+
+ ceph osd set-backfillfull-ratio .85
+
+ for p in $(seq 1 $pools)
+ do
+ create_pool "${poolprefix}$p" 1 1
+ ceph osd pool set "${poolprefix}$p" size 1
+ done
+
+ wait_for_clean || return 1
+
+ # This won't work is if the 2 pools primary and only osds
+ # are the same.
+
+ dd if=/dev/urandom of=$dir/datafile bs=1024 count=4
+ for o in $(seq 1 $objects)
+ do
+ for p in $(seq 1 $pools)
+ do
+ rados -p "${poolprefix}$p" put obj$o $dir/datafile
+ done
+ done
+
+ ceph pg dump pgs
+
+ for p in $(seq 1 $pools)
+ do
+ ceph osd pool set "${poolprefix}$p" size 2
+ done
+ sleep 30
+
+ wait_for_not_backfilling 240 || return 1
+ wait_for_not_activating 60 || return 1
+
+ ERRORS=0
+ if [ "$(ceph pg dump pgs | grep +backfill_toofull | wc -l)" != "1" ];
+ then
+ echo "One pool should have been in backfill_toofull"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+
+ expected="$(expr $pools - 1)"
+ if [ "$(ceph pg dump pgs | grep active+clean | wc -l)" != "$expected" ];
+ then
+ echo "$expected didn't finish backfill"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+
+ ceph pg dump pgs
+
+ if [ $ERRORS != "0" ];
+ then
+ return 1
+ fi
+
+ for i in $(seq 1 $pools)
+ do
+ delete_pool "${poolprefix}$i"
+ done
+ kill_daemons $dir || return 1
+ ! grep -q "num_bytes mismatch" $dir/osd.*.log || return 1
+}
+
+
+# Create 8 pools of size 1 on 20 OSDs
+# Write 4K * 600 objects (only 1 pool pg can fit on any given osd)
+# Increase pool size to 2
+# At least 1 pool shouldn't have room to backfill
+# All other pools should go active+clean
+function TEST_backfill_test_multi() {
+ local dir=$1
+ local pools=8
+ local OSDS=20
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ export CEPH_ARGS
+
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ run_osd $dir $osd || return 1
+ done
+
+ ceph osd set-backfillfull-ratio .85
+
+ for p in $(seq 1 $pools)
+ do
+ create_pool "${poolprefix}$p" 1 1
+ ceph osd pool set "${poolprefix}$p" size 1
+ done
+
+ wait_for_clean || return 1
+
+ dd if=/dev/urandom of=$dir/datafile bs=1024 count=4
+ for o in $(seq 1 $objects)
+ do
+ for p in $(seq 1 $pools)
+ do
+ rados -p "${poolprefix}$p" put obj$o $dir/datafile
+ done
+ done
+
+ ceph pg dump pgs
+
+ for p in $(seq 1 $pools)
+ do
+ ceph osd pool set "${poolprefix}$p" size 2
+ done
+ sleep 30
+
+ wait_for_not_backfilling 240 || return 1
+ wait_for_not_activating 60 || return 1
+
+ ERRORS=0
+ full="$(ceph pg dump pgs | grep +backfill_toofull | wc -l)"
+ if [ "$full" -lt "1" ];
+ then
+ echo "At least one pool should have been in backfill_toofull"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+
+ expected="$(expr $pools - $full)"
+ if [ "$(ceph pg dump pgs | grep active+clean | wc -l)" != "$expected" ];
+ then
+ echo "$expected didn't finish backfill"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+
+ ceph pg dump pgs
+ ceph status
+
+ ceph status --format=json-pretty > $dir/stat.json
+
+ eval SEV=$(jq '.health.checks.PG_BACKFILL_FULL.severity' $dir/stat.json)
+ if [ "$SEV" != "HEALTH_WARN" ]; then
+ echo "PG_BACKFILL_FULL severity $SEV not HEALTH_WARN"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+ eval MSG=$(jq '.health.checks.PG_BACKFILL_FULL.summary.message' $dir/stat.json)
+ if [ "$MSG" != "Low space hindering backfill (add storage if this doesn't resolve itself): 4 pgs backfill_toofull" ]; then
+ echo "PG_BACKFILL_FULL message '$MSG' mismatched"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+ rm -f $dir/stat.json
+
+ if [ $ERRORS != "0" ];
+ then
+ return 1
+ fi
+
+ for i in $(seq 1 $pools)
+ do
+ delete_pool "${poolprefix}$i"
+ done
+ # Work around for http://tracker.ceph.com/issues/38195
+ kill_daemons $dir #|| return 1
+ ! grep -q "num_bytes mismatch" $dir/osd.*.log || return 1
+}
+
+
+# To make sure that when 2 pg try to backfill at the same time to
+# the same target. This might be covered by the simple test above
+# but this makes sure we get it.
+#
+# Create 10 pools of size 2 and identify 2 that have the same
+# non-primary osd.
+# Delete all other pools
+# Set size to 1 and write 4K * 600 to each pool
+# Set size back to 2
+# The 2 pools should race to backfill.
+# One pool goes active+clean
+# The other goes acitve+...+backfill_toofull
+function TEST_backfill_test_sametarget() {
+ local dir=$1
+ local pools=10
+ local OSDS=5
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ export CEPH_ARGS
+
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ run_osd $dir $osd || return 1
+ done
+
+ ceph osd set-backfillfull-ratio .85
+
+ for p in $(seq 1 $pools)
+ do
+ create_pool "${poolprefix}$p" 1 1
+ ceph osd pool set "${poolprefix}$p" size 2
+ done
+ sleep 5
+
+ wait_for_clean || return 1
+
+ ceph pg dump pgs
+
+ # Find 2 pools with a pg that distinct primaries but second
+ # replica on the same osd.
+ local PG1
+ local POOLNUM1
+ local pool1
+ local chk_osd1
+ local chk_osd2
+
+ local PG2
+ local POOLNUM2
+ local pool2
+ for p in $(seq 1 $pools)
+ do
+ ceph pg map ${p}.0 --format=json | jq '.acting[]' > $dir/acting
+ local test_osd1=$(head -1 $dir/acting)
+ local test_osd2=$(tail -1 $dir/acting)
+ if [ $p = "1" ];
+ then
+ PG1="${p}.0"
+ POOLNUM1=$p
+ pool1="${poolprefix}$p"
+ chk_osd1=$test_osd1
+ chk_osd2=$test_osd2
+ elif [ $chk_osd1 != $test_osd1 -a $chk_osd2 = $test_osd2 ];
+ then
+ PG2="${p}.0"
+ POOLNUM2=$p
+ pool2="${poolprefix}$p"
+ break
+ fi
+ done
+ rm -f $dir/acting
+
+ if [ "$pool2" = "" ];
+ then
+ echo "Failure to find appropirate PGs"
+ return 1
+ fi
+
+ for p in $(seq 1 $pools)
+ do
+ if [ $p != $POOLNUM1 -a $p != $POOLNUM2 ];
+ then
+ delete_pool ${poolprefix}$p
+ fi
+ done
+
+ ceph osd pool set $pool1 size 1
+ ceph osd pool set $pool2 size 1
+
+ wait_for_clean || return 1
+
+ dd if=/dev/urandom of=$dir/datafile bs=1024 count=4
+ for i in $(seq 1 $objects)
+ do
+ rados -p $pool1 put obj$i $dir/datafile
+ rados -p $pool2 put obj$i $dir/datafile
+ done
+
+ ceph osd pool set $pool1 size 2
+ ceph osd pool set $pool2 size 2
+ sleep 30
+
+ wait_for_not_backfilling 240 || return 1
+ wait_for_not_activating 60 || return 1
+
+ ERRORS=0
+ if [ "$(ceph pg dump pgs | grep +backfill_toofull | wc -l)" != "1" ];
+ then
+ echo "One pool should have been in backfill_toofull"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+
+ if [ "$(ceph pg dump pgs | grep active+clean | wc -l)" != "1" ];
+ then
+ echo "One didn't finish backfill"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+
+ ceph pg dump pgs
+
+ if [ $ERRORS != "0" ];
+ then
+ return 1
+ fi
+
+ delete_pool $pool1
+ delete_pool $pool2
+ kill_daemons $dir || return 1
+ ! grep -q "num_bytes mismatch" $dir/osd.*.log || return 1
+}
+
+# 2 pools can't both backfill to a target which has other data
+# 1 of the pools has objects that increase from 1024 to 2611 bytes
+#
+# Write to fill pool which is size 1
+# Take fill pool osd down (other 2 pools must go to the remaining OSDs
+# Save an export of data on fill OSD and restart it
+# Write an intial 1K to pool1 which has pg 2.0
+# Export 2.0 from non-fillpool OSD don't wait for it to start-up
+# Take down fillpool OSD
+# Put 1K object version of 2.0 on fillpool OSD
+# Put back fillpool data on fillpool OSD
+# With fillpool down write 2611 byte objects
+# Take down $osd and bring back $fillosd simultaneously
+# Wait for backfilling
+# One PG will be able to backfill its remaining data
+# One PG must get backfill_toofull
+function TEST_backfill_multi_partial() {
+ local dir=$1
+ local EC=$2
+ local pools=2
+ local OSDS=3
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ export CEPH_ARGS
+
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ run_osd $dir $osd || return 1
+ done
+
+ ceph osd set-backfillfull-ratio .85
+
+ ceph osd set-require-min-compat-client luminous
+ create_pool fillpool 1 1
+ ceph osd pool set fillpool size 1
+ for p in $(seq 1 $pools)
+ do
+ create_pool "${poolprefix}$p" 1 1
+ ceph osd pool set "${poolprefix}$p" size 2
+ done
+
+ wait_for_clean || return 1
+
+ # Partially fill an osd
+ # We have room for 600 6K replicated objects, if we create 2611 byte objects
+ # there is 3600K - (2611 * 600) = 2070K, so the fill pool and one
+ # replica from the other 2 is 85% of 3600K
+
+ dd if=/dev/urandom of=$dir/datafile bs=2611 count=1
+ for o in $(seq 1 $objects)
+ do
+ rados -p fillpool put obj-fill-${o} $dir/datafile
+ done
+
+ local fillosd=$(get_primary fillpool obj-fill-1)
+ osd=$(expr $fillosd + 1)
+ if [ "$osd" = "$OSDS" ]; then
+ osd="0"
+ fi
+
+ kill_daemon $dir/osd.$fillosd.pid TERM
+ ceph osd out osd.$fillosd
+
+ _objectstore_tool_nodown $dir $fillosd --op export-remove --pgid 1.0 --file $dir/fillexport.out || return 1
+ activate_osd $dir $fillosd || return 1
+
+ ceph pg dump pgs
+
+ dd if=/dev/urandom of=$dir/datafile bs=1024 count=1
+ for o in $(seq 1 $objects)
+ do
+ rados -p "${poolprefix}1" put obj-1-${o} $dir/datafile
+ done
+
+ ceph pg dump pgs
+ # The $osd OSD is started, but we don't wait so we can kill $fillosd at the same time
+ _objectstore_tool_nowait $dir $osd --op export --pgid 2.0 --file $dir/export.out
+ kill_daemon $dir/osd.$fillosd.pid TERM
+ _objectstore_tool_nodown $dir $fillosd --force --op remove --pgid 2.0
+ _objectstore_tool_nodown $dir $fillosd --op import --pgid 2.0 --file $dir/export.out || return 1
+ _objectstore_tool_nodown $dir $fillosd --op import --pgid 1.0 --file $dir/fillexport.out || return 1
+ ceph pg dump pgs
+ sleep 20
+ ceph pg dump pgs
+
+ # re-write everything
+ dd if=/dev/urandom of=$dir/datafile bs=2611 count=1
+ for o in $(seq 1 $objects)
+ do
+ for p in $(seq 1 $pools)
+ do
+ rados -p "${poolprefix}$p" put obj-${p}-${o} $dir/datafile
+ done
+ done
+
+ kill_daemon $dir/osd.$osd.pid TERM
+ ceph osd out osd.$osd
+
+ activate_osd $dir $fillosd || return 1
+ ceph osd in osd.$fillosd
+ sleep 30
+
+ wait_for_not_backfilling 240 || return 1
+ wait_for_not_activating 60 || return 1
+
+ flush_pg_stats || return 1
+ ceph pg dump pgs
+
+ ERRORS=0
+ if [ "$(get_num_in_state backfill_toofull)" != "1" ];
+ then
+ echo "One PG should be in backfill_toofull"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+
+ if [ "$(get_num_in_state active+clean)" != "2" ];
+ then
+ echo "Two PGs should be active+clean after one PG completed backfill"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+
+ if [ $ERRORS != "0" ];
+ then
+ return 1
+ fi
+
+ delete_pool fillpool
+ for i in $(seq 1 $pools)
+ do
+ delete_pool "${poolprefix}$i"
+ done
+ kill_daemons $dir || return 1
+ ! grep -q "num_bytes mismatch" $dir/osd.*.log || return 1
+}
+
+# Make sure that the amount of bytes already on the replica doesn't
+# cause an out of space condition
+#
+# Create 1 pool and write 4K * 600 objects
+# Remove 25% (150) of the objects with one OSD down (noout set)
+# Increase the size of the remaining 75% (450) of the objects to 6K
+# Bring back down OSD
+# The pool should go active+clean
+function TEST_backfill_grow() {
+ local dir=$1
+ local poolname="test"
+ local OSDS=3
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ run_osd $dir $osd || return 1
+ done
+
+ ceph osd set-backfillfull-ratio .85
+
+ create_pool $poolname 1 1
+ ceph osd pool set $poolname size 3
+ sleep 5
+
+ wait_for_clean || return 1
+
+ dd if=/dev/urandom of=${dir}/4kdata bs=1k count=4
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i $dir/4kdata
+ done
+
+ local PG=$(get_pg $poolname obj1)
+ # Remember primary during the backfill
+ local primary=$(get_primary $poolname obj1)
+ local otherosd=$(get_not_primary $poolname obj1)
+
+ ceph osd set noout
+ kill_daemons $dir TERM $otherosd || return 1
+
+ rmobjects=$(expr $objects / 4)
+ for i in $(seq 1 $rmobjects)
+ do
+ rados -p $poolname rm obj$i
+ done
+
+ dd if=/dev/urandom of=${dir}/6kdata bs=6k count=1
+ for i in $(seq $(expr $rmobjects + 1) $objects)
+ do
+ rados -p $poolname put obj$i $dir/6kdata
+ done
+
+ activate_osd $dir $otherosd || return 1
+
+ ceph tell osd.$primary debug kick_recovery_wq 0
+
+ sleep 2
+
+ wait_for_clean || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+ ! grep -q "num_bytes mismatch" $dir/osd.*.log || return 1
+}
+
+# Create a 5 shard EC pool on 6 OSD cluster
+# Fill 1 OSD with 2600K of data take that osd down.
+# Write the EC pool on 5 OSDs
+# Take down 1 (must contain an EC shard)
+# Bring up OSD with fill data
+# Not enought room to backfill to partially full OSD
+function TEST_ec_backfill_simple() {
+ local dir=$1
+ local EC=$2
+ local pools=1
+ local OSDS=6
+ local k=3
+ local m=2
+ local ecobjects=$(expr $objects / $k)
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ export CEPH_ARGS
+
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ run_osd $dir $osd || return 1
+ done
+
+ ceph osd set-backfillfull-ratio .85
+ create_pool fillpool 1 1
+ ceph osd pool set fillpool size 1
+
+ # Partially fill an osd
+ # We have room for 200 18K replicated objects, if we create 13K objects
+ # there is only 3600K - (13K * 200) = 1000K which won't hold
+ # a k=3 shard below ((18K / 3) + 4K) * 200 = 2000K
+ # Actual usage per shard is 8K * 200 = 1600K because 18K/3 is 6K which
+ # rounds to 8K. The 2000K is the ceiling on the 18K * 200 = 3600K logical
+ # bytes in the pool.
+ dd if=/dev/urandom of=$dir/datafile bs=1024 count=13
+ for o in $(seq 1 $ecobjects)
+ do
+ rados -p fillpool put obj$o $dir/datafile
+ done
+
+ local fillosd=$(get_primary fillpool obj1)
+ osd=$(expr $fillosd + 1)
+ if [ "$osd" = "$OSDS" ]; then
+ osd="0"
+ fi
+
+ sleep 5
+ kill_daemon $dir/osd.$fillosd.pid TERM
+ ceph osd out osd.$fillosd
+ sleep 2
+ ceph osd erasure-code-profile set ec-profile k=$k m=$m crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1
+
+ for p in $(seq 1 $pools)
+ do
+ ceph osd pool create "${poolprefix}$p" 1 1 erasure ec-profile
+ done
+
+ # Can't wait for clean here because we created a stale pg
+ #wait_for_clean || return 1
+ sleep 5
+
+ ceph pg dump pgs
+
+ dd if=/dev/urandom of=$dir/datafile bs=1024 count=18
+ for o in $(seq 1 $ecobjects)
+ do
+ for p in $(seq 1 $pools)
+ do
+ rados -p "${poolprefix}$p" put obj$o $dir/datafile
+ done
+ done
+
+ kill_daemon $dir/osd.$osd.pid TERM
+ ceph osd out osd.$osd
+
+ activate_osd $dir $fillosd || return 1
+ ceph osd in osd.$fillosd
+ sleep 30
+
+ ceph pg dump pgs
+
+ wait_for_not_backfilling 240 || return 1
+ wait_for_not_activating 60 || return 1
+
+ ceph pg dump pgs
+
+ ERRORS=0
+ if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep +backfill_toofull | wc -l)" != "1" ]; then
+ echo "One pool should have been in backfill_toofull"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+
+ if [ $ERRORS != "0" ];
+ then
+ return 1
+ fi
+
+ delete_pool fillpool
+ for i in $(seq 1 $pools)
+ do
+ delete_pool "${poolprefix}$i"
+ done
+ kill_daemons $dir || return 1
+}
+
+function osdlist() {
+ local OSDS=$1
+ local excludeosd=$2
+
+ osds=""
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ if [ $osd = $excludeosd ];
+ then
+ continue
+ fi
+ if [ -n "$osds" ]; then
+ osds="${osds} "
+ fi
+ osds="${osds}${osd}"
+ done
+ echo $osds
+}
+
+# Create a pool with size 1 and fill with data so that only 1 EC shard can fit.
+# Write data to 2 EC pools mapped to the same OSDs (excluding filled one)
+# Remap the last OSD to partially full OSD on both pools
+# The 2 pools should race to backfill.
+# One pool goes active+clean
+# The other goes acitve+...+backfill_toofull
+function TEST_ec_backfill_multi() {
+ local dir=$1
+ local EC=$2
+ local pools=2
+ local OSDS=6
+ local k=3
+ local m=2
+ local ecobjects=$(expr $objects / $k)
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ export CEPH_ARGS
+
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ run_osd $dir $osd || return 1
+ done
+
+ # This test requires that shards from 2 different pools
+ # fit on a given OSD, but both will not fix. I'm using
+ # making the fillosd plus 1 shard use 75% of the space,
+ # leaving not enough to be under the 85% set here.
+ ceph osd set-backfillfull-ratio .85
+
+ ceph osd set-require-min-compat-client luminous
+ create_pool fillpool 1 1
+ ceph osd pool set fillpool size 1
+
+ # Partially fill an osd
+ # We have room for 200 18K replicated objects, if we create 9K objects
+ # there is only 3600K - (9K * 200) = 1800K which will only hold
+ # one k=3 shard below ((12K / 3) + 4K) * 200 = 1600K
+ # The actual data will be (12K / 3) * 200 = 800K because the extra
+ # is the reservation padding for chunking.
+ dd if=/dev/urandom of=$dir/datafile bs=1024 count=9
+ for o in $(seq 1 $ecobjects)
+ do
+ rados -p fillpool put obj$o $dir/datafile
+ done
+
+ local fillosd=$(get_primary fillpool obj1)
+ ceph osd erasure-code-profile set ec-profile k=3 m=2 crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1
+
+ nonfillosds="$(osdlist $OSDS $fillosd)"
+
+ for p in $(seq 1 $pools)
+ do
+ ceph osd pool create "${poolprefix}$p" 1 1 erasure ec-profile
+ ceph osd pg-upmap "$(expr $p + 1).0" $nonfillosds
+ done
+
+ # Can't wait for clean here because we created a stale pg
+ #wait_for_clean || return 1
+ sleep 15
+
+ ceph pg dump pgs
+
+ dd if=/dev/urandom of=$dir/datafile bs=1024 count=12
+ for o in $(seq 1 $ecobjects)
+ do
+ for p in $(seq 1 $pools)
+ do
+ rados -p "${poolprefix}$p" put obj$o-$p $dir/datafile
+ done
+ done
+
+ ceph pg dump pgs
+
+ for p in $(seq 1 $pools)
+ do
+ ceph osd pg-upmap $(expr $p + 1).0 ${nonfillosds% *} $fillosd
+ done
+
+ sleep 30
+
+ wait_for_not_backfilling 240 || return 1
+ wait_for_not_activating 60 || return 1
+
+ ceph pg dump pgs
+
+ ERRORS=0
+ if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep +backfill_toofull | wc -l)" != "1" ];
+ then
+ echo "One pool should have been in backfill_toofull"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+
+ if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep active+clean | wc -l)" != "1" ];
+ then
+ echo "One didn't finish backfill"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+
+ if [ $ERRORS != "0" ];
+ then
+ return 1
+ fi
+
+ delete_pool fillpool
+ for i in $(seq 1 $pools)
+ do
+ delete_pool "${poolprefix}$i"
+ done
+ kill_daemons $dir || return 1
+}
+
+# Similar to TEST_ec_backfill_multi but one of the ec pools
+# already had some data on the target OSD
+
+# Create a pool with size 1 and fill with data so that only 1 EC shard can fit.
+# Write a small amount of data to 1 EC pool that still includes the filled one
+# Take down fillosd with noout set
+# Write data to 2 EC pools mapped to the same OSDs (excluding filled one)
+# Remap the last OSD to partially full OSD on both pools
+# The 2 pools should race to backfill.
+# One pool goes active+clean
+# The other goes acitve+...+backfill_toofull
+function SKIP_TEST_ec_backfill_multi_partial() {
+ local dir=$1
+ local EC=$2
+ local pools=2
+ local OSDS=5
+ local k=3
+ local m=2
+ local ecobjects=$(expr $objects / $k)
+ local lastosd=$(expr $OSDS - 1)
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ export CEPH_ARGS
+
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ run_osd $dir $osd || return 1
+ done
+
+ # This test requires that shards from 2 different pools
+ # fit on a given OSD, but both will not fix. I'm using
+ # making the fillosd plus 1 shard use 75% of the space,
+ # leaving not enough to be under the 85% set here.
+ ceph osd set-backfillfull-ratio .85
+
+ ceph osd set-require-min-compat-client luminous
+ create_pool fillpool 1 1
+ ceph osd pool set fillpool size 1
+ # last osd
+ ceph osd pg-upmap 1.0 $lastosd
+
+ # Partially fill an osd
+ # We have room for 200 18K replicated objects, if we create 9K objects
+ # there is only 3600K - (9K * 200) = 1800K which will only hold
+ # one k=3 shard below ((12K / 3) + 4K) * 200 = 1600K
+ # The actual data will be (12K / 3) * 200 = 800K because the extra
+ # is the reservation padding for chunking.
+ dd if=/dev/urandom of=$dir/datafile bs=1024 count=9
+ for o in $(seq 1 $ecobjects)
+ do
+ rados -p fillpool put obj$o $dir/datafile
+ done
+
+ local fillosd=$(get_primary fillpool obj1)
+ ceph osd erasure-code-profile set ec-profile k=3 m=2 crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1
+
+ nonfillosds="$(osdlist $OSDS $fillosd)"
+
+ for p in $(seq 1 $pools)
+ do
+ ceph osd pool create "${poolprefix}$p" 1 1 erasure ec-profile
+ ceph osd pg-upmap "$(expr $p + 1).0" $(seq 0 $lastosd)
+ done
+
+ # Can't wait for clean here because we created a stale pg
+ #wait_for_clean || return 1
+ sleep 15
+
+ ceph pg dump pgs
+
+ dd if=/dev/urandom of=$dir/datafile bs=1024 count=1
+ for o in $(seq 1 $ecobjects)
+ do
+ rados -p "${poolprefix}1" put obj$o-1 $dir/datafile
+ done
+
+ for p in $(seq 1 $pools)
+ do
+ ceph osd pg-upmap "$(expr $p + 1).0" $(seq 0 $(expr $lastosd - 1))
+ done
+ ceph pg dump pgs
+
+ #ceph osd set noout
+ #kill_daemons $dir TERM osd.$lastosd || return 1
+
+ dd if=/dev/urandom of=$dir/datafile bs=1024 count=12
+ for o in $(seq 1 $ecobjects)
+ do
+ for p in $(seq 1 $pools)
+ do
+ rados -p "${poolprefix}$p" put obj$o-$p $dir/datafile
+ done
+ done
+
+ ceph pg dump pgs
+
+ # Now backfill lastosd by adding back into the upmap
+ for p in $(seq 1 $pools)
+ do
+ ceph osd pg-upmap "$(expr $p + 1).0" $(seq 0 $lastosd)
+ done
+ #activate_osd $dir $lastosd || return 1
+ #ceph tell osd.0 debug kick_recovery_wq 0
+
+ sleep 30
+ ceph pg dump pgs
+
+ wait_for_not_backfilling 240 || return 1
+ wait_for_not_activating 60 || return 1
+
+ ceph pg dump pgs
+
+ ERRORS=0
+ if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep +backfill_toofull | wc -l)" != "1" ];
+ then
+ echo "One pool should have been in backfill_toofull"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+
+ if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep active+clean | wc -l)" != "1" ];
+ then
+ echo "One didn't finish backfill"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+
+ if [ $ERRORS != "0" ];
+ then
+ return 1
+ fi
+
+ delete_pool fillpool
+ for i in $(seq 1 $pools)
+ do
+ delete_pool "${poolprefix}$i"
+ done
+ kill_daemons $dir || return 1
+}
+
+function SKIP_TEST_ec_backfill_multi_partial() {
+ local dir=$1
+ local EC=$2
+ local pools=2
+ local OSDS=6
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ export CEPH_ARGS
+
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ run_osd $dir $osd || return 1
+ done
+
+ # Below we need to fit 3200K in 3600K which is 88%
+ # so set to 90%
+ ceph osd set-backfillfull-ratio .90
+
+ ceph osd set-require-min-compat-client luminous
+ create_pool fillpool 1 1
+ ceph osd pool set fillpool size 1
+
+ # Partially fill an osd
+ # We have room for 200 48K ec objects, if we create 4k replicated objects
+ # there is 3600K - (4K * 200) = 2800K which won't hold 2 k=3 shard
+ # of 200 12K objects which takes ((12K / 3) + 4K) * 200 = 1600K each.
+ # On the other OSDs 2 * 1600K = 3200K which is 88% of 3600K.
+ dd if=/dev/urandom of=$dir/datafile bs=1024 count=4
+ for o in $(seq 1 $objects)
+ do
+ rados -p fillpool put obj$o $dir/datafile
+ done
+
+ local fillosd=$(get_primary fillpool obj1)
+ osd=$(expr $fillosd + 1)
+ if [ "$osd" = "$OSDS" ]; then
+ osd="0"
+ fi
+
+ sleep 5
+ kill_daemon $dir/osd.$fillosd.pid TERM
+ ceph osd out osd.$fillosd
+ sleep 2
+ ceph osd erasure-code-profile set ec-profile k=3 m=2 crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1
+
+ for p in $(seq 1 $pools)
+ do
+ ceph osd pool create "${poolprefix}$p" 1 1 erasure ec-profile
+ done
+
+ # Can't wait for clean here because we created a stale pg
+ #wait_for_clean || return 1
+ sleep 5
+
+ ceph pg dump pgs
+
+ dd if=/dev/urandom of=$dir/datafile bs=1024 count=12
+ for o in $(seq 1 $objects)
+ do
+ for p in $(seq 1 $pools)
+ do
+ rados -p "${poolprefix}$p" put obj$o $dir/datafile
+ done
+ done
+
+ #ceph pg map 2.0 --format=json | jq '.'
+ kill_daemon $dir/osd.$osd.pid TERM
+ ceph osd out osd.$osd
+
+ _objectstore_tool_nodown $dir $osd --op export --pgid 2.0 --file $dir/export.out
+ _objectstore_tool_nodown $dir $fillosd --op import --pgid 2.0 --file $dir/export.out
+
+ activate_osd $dir $fillosd || return 1
+ ceph osd in osd.$fillosd
+ sleep 30
+
+ wait_for_not_backfilling 240 || return 1
+ wait_for_not_activating 60 || return 1
+
+ ERRORS=0
+ if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep +backfill_toofull | wc -l)" != "1" ];
+ then
+ echo "One pool should have been in backfill_toofull"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+
+ if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep active+clean | wc -l)" != "1" ];
+ then
+ echo "One didn't finish backfill"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+
+ ceph pg dump pgs
+
+ if [ $ERRORS != "0" ];
+ then
+ return 1
+ fi
+
+ delete_pool fillpool
+ for i in $(seq 1 $pools)
+ do
+ delete_pool "${poolprefix}$i"
+ done
+ kill_daemons $dir || return 1
+}
+
+# Create 1 EC pool
+# Write 200 12K objects ((12K / 3) + 4K) *200) = 1600K
+# Take 1 shard's OSD down (with noout set)
+# Remove 50 objects ((12K / 3) + 4k) * 50) = 400K
+# Write 150 36K objects (grow 150 objects) 2400K
+# But there is already 1600K usage so backfill
+# would be too full if it didn't account for existing data
+# Bring back down OSD so it must backfill
+# It should go active+clean taking into account data already there
+function TEST_ec_backfill_grow() {
+ local dir=$1
+ local poolname="test"
+ local OSDS=6
+ local k=3
+ local m=2
+ local ecobjects=$(expr $objects / $k)
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ run_osd $dir $osd || return 1
+ done
+
+ ceph osd set-backfillfull-ratio .85
+
+ ceph osd set-require-min-compat-client luminous
+ ceph osd erasure-code-profile set ec-profile k=$k m=$m crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1
+ ceph osd pool create $poolname 1 1 erasure ec-profile
+
+ wait_for_clean || return 1
+
+ dd if=/dev/urandom of=${dir}/12kdata bs=1k count=12
+ for i in $(seq 1 $ecobjects)
+ do
+ rados -p $poolname put obj$i $dir/12kdata
+ done
+
+ local PG=$(get_pg $poolname obj1)
+ # Remember primary during the backfill
+ local primary=$(get_primary $poolname obj1)
+ local otherosd=$(get_not_primary $poolname obj1)
+
+ ceph osd set noout
+ kill_daemons $dir TERM $otherosd || return 1
+
+ rmobjects=$(expr $ecobjects / 4)
+ for i in $(seq 1 $rmobjects)
+ do
+ rados -p $poolname rm obj$i
+ done
+
+ dd if=/dev/urandom of=${dir}/36kdata bs=1k count=36
+ for i in $(seq $(expr $rmobjects + 1) $ecobjects)
+ do
+ rados -p $poolname put obj$i $dir/36kdata
+ done
+
+ activate_osd $dir $otherosd || return 1
+
+ ceph tell osd.$primary debug kick_recovery_wq 0
+
+ sleep 2
+
+ wait_for_clean || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+main osd-backfill-space "$@"
+
+# Local Variables:
+# compile-command: "make -j4 && ../qa/run-standalone.sh osd-backfill-space.sh"
+# End:
diff --git a/qa/standalone/osd/osd-backfill-stats.sh b/qa/standalone/osd/osd-backfill-stats.sh
new file mode 100755
index 00000000..104533e7
--- /dev/null
+++ b/qa/standalone/osd/osd-backfill-stats.sh
@@ -0,0 +1,753 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2017 Red Hat <contact@redhat.com>
+#
+# Author: David Zafman <dzafman@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ # Fix port????
+ export CEPH_MON="127.0.0.1:7114" # git grep '\<7114\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+ CEPH_ARGS+="--osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10 "
+ export margin=10
+ export objects=200
+ export poolname=test
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function below_margin() {
+ local -i check=$1
+ shift
+ local -i target=$1
+
+ return $(( $check <= $target && $check >= $target - $margin ? 0 : 1 ))
+}
+
+function above_margin() {
+ local -i check=$1
+ shift
+ local -i target=$1
+
+ return $(( $check >= $target && $check <= $target + $margin ? 0 : 1 ))
+}
+
+FIND_UPACT='grep "pg[[]${PG}.*backfilling.*_update_calc_stats " $log | tail -1 | sed "s/.*[)] \([[][^ p]*\).*$/\1/"'
+FIND_FIRST='grep "pg[[]${PG}.*backfilling.*_update_calc_stats $which " $log | grep -F " ${UPACT}${addp}" | grep -v est | head -1 | sed "s/.* \([0-9]*\)$/\1/"'
+FIND_LAST='grep "pg[[]${PG}.*backfilling.*_update_calc_stats $which " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/"'
+
+function check() {
+ local dir=$1
+ local PG=$2
+ local primary=$3
+ local type=$4
+ local degraded_start=$5
+ local degraded_end=$6
+ local misplaced_start=$7
+ local misplaced_end=$8
+ local primary_start=${9:-}
+ local primary_end=${10:-}
+ local check_setup=${11:-true}
+
+ local log=$(grep -l +backfilling $dir/osd.$primary.log)
+ if [ $check_setup = "true" ];
+ then
+ local alllogs=$(grep -l +backfilling $dir/osd.*.log)
+ if [ "$(echo "$alllogs" | wc -w)" != "1" ];
+ then
+ echo "Test setup failure, a single OSD should have performed backfill"
+ return 1
+ fi
+ fi
+
+ local addp=" "
+ if [ "$type" = "erasure" ];
+ then
+ addp="p"
+ fi
+
+ UPACT=$(eval $FIND_UPACT)
+ [ -n "$UPACT" ] || return 1
+
+ # Check 3rd line at start because of false recovery starts
+ local which="degraded"
+ FIRST=$(eval $FIND_FIRST)
+ [ -n "$FIRST" ] || return 1
+ below_margin $FIRST $degraded_start || return 1
+ LAST=$(eval $FIND_LAST)
+ [ -n "$LAST" ] || return 1
+ above_margin $LAST $degraded_end || return 1
+
+ # Check 3rd line at start because of false recovery starts
+ which="misplaced"
+ FIRST=$(eval $FIND_FIRST)
+ [ -n "$FIRST" ] || return 1
+ below_margin $FIRST $misplaced_start || return 1
+ LAST=$(eval $FIND_LAST)
+ [ -n "$LAST" ] || return 1
+ above_margin $LAST $misplaced_end || return 1
+
+ # This is the value of set into MISSING_ON_PRIMARY
+ if [ -n "$primary_start" ];
+ then
+ which="shard $primary"
+ FIRST=$(eval $FIND_FIRST)
+ [ -n "$FIRST" ] || return 1
+ below_margin $FIRST $primary_start || return 1
+ LAST=$(eval $FIND_LAST)
+ [ -n "$LAST" ] || return 1
+ above_margin $LAST $primary_end || return 1
+ fi
+}
+
+# [1] -> [1, 0, 2]
+# degraded 1000 -> 0
+# state: active+undersized+degraded+remapped+backfilling
+
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 500 0 1000 0 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-10-27 09:44:23.531466 22'500 26:617 [1,0,2] 1 [1] 1 0'0 2017-10-27 09:43:44.654882 0'0 2017-10-27 09:43:44.654882
+function TEST_backfill_sizeup() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ export CEPH_ARGS
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+ run_osd $dir 4 || return 1
+ run_osd $dir 5 || return 1
+
+ create_pool $poolname 1 1
+ ceph osd pool set $poolname size 1
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ ceph osd pool set $poolname size 3
+ sleep 15
+
+ wait_for_clean || return 1
+
+ local primary=$(get_primary $poolname obj1)
+ local PG=$(get_pg $poolname obj1)
+
+ local degraded=$(expr $objects \* 2)
+ check $dir $PG $primary replicated $degraded 0 0 0 || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+
+
+# [1] -> [0, 2, 4]
+# degraded 1000 -> 0
+# misplaced 500 -> 0
+# state: active+undersized+degraded+remapped+backfilling
+
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 500 0 1000 500 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-10-27 09:48:53.326849 22'500 26:603 [0,2,4] 0 [1] 1 0'0 2017-10-27 09:48:13.236253 0'0 2017-10-27 09:48:13.236253
+function TEST_backfill_sizeup_out() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+ run_osd $dir 4 || return 1
+ run_osd $dir 5 || return 1
+
+ create_pool $poolname 1 1
+ ceph osd pool set $poolname size 1
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ local PG=$(get_pg $poolname obj1)
+ # Remember primary during the backfill
+ local primary=$(get_primary $poolname obj1)
+
+ ceph osd out osd.$primary
+ ceph osd pool set $poolname size 3
+ sleep 15
+
+ wait_for_clean || return 1
+
+ local degraded=$(expr $objects \* 2)
+ check $dir $PG $primary replicated $degraded 0 $objects 0 || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+
+# [1 0] -> [1,2]/[1,0]
+# misplaced 500 -> 0
+# state: active+remapped+backfilling
+
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 500 0 0 500 0 0 100 100 active+remapped+backfilling 2017-10-27 09:51:18.800517 22'500 25:570 [1,2] 1 [1,0] 1 0'0 2017-10-27 09:50:40.441274 0'0 2017-10-27 09:50:40.441274
+function TEST_backfill_out() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+ run_osd $dir 4 || return 1
+ run_osd $dir 5 || return 1
+
+ create_pool $poolname 1 1
+ ceph osd pool set $poolname size 2
+ sleep 5
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ local PG=$(get_pg $poolname obj1)
+ # Remember primary during the backfill
+ local primary=$(get_primary $poolname obj1)
+
+ ceph osd out osd.$(get_not_primary $poolname obj1)
+ sleep 15
+
+ wait_for_clean || return 1
+
+ check $dir $PG $primary replicated 0 0 $objects 0 || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+
+# [0, 1] -> [0, 2]/[0]
+# osd 1 down/out
+# degraded 500 -> 0
+# state: active+undersized+degraded+remapped+backfilling
+
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 500 0 500 0 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-10-27 09:53:24.051091 22'500 27:719 [0,2] 0 [0] 0 0'0 2017-10-27 09:52:43.188368 0'0 2017-10-27 09:52:43.188368
+function TEST_backfill_down_out() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+ run_osd $dir 4 || return 1
+ run_osd $dir 5 || return 1
+
+ create_pool $poolname 1 1
+ ceph osd pool set $poolname size 2
+ sleep 5
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ local PG=$(get_pg $poolname obj1)
+ # Remember primary during the backfill
+ local primary=$(get_primary $poolname obj1)
+ local otherosd=$(get_not_primary $poolname obj1)
+
+ kill $(cat $dir/osd.${otherosd}.pid)
+ ceph osd down osd.${otherosd}
+ ceph osd out osd.${otherosd}
+ sleep 15
+
+ wait_for_clean || return 1
+
+ check $dir $PG $primary replicated $objects 0 0 0 || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+
+# [1, 0] -> [2, 3, 4]
+# degraded 500 -> 0
+# misplaced 1000 -> 0
+# state: active+undersized+degraded+remapped+backfilling
+
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 500 0 500 1000 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-10-27 09:55:50.375722 23'500 27:553 [2,4,3] 2 [1,0] 1 0'0 2017-10-27 09:55:10.230919 0'0 2017-10-27 09:55:10.230919
+function TEST_backfill_out2() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+ run_osd $dir 4 || return 1
+ run_osd $dir 5 || return 1
+
+ create_pool $poolname 1 1
+ ceph osd pool set $poolname size 2
+ sleep 5
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ local PG=$(get_pg $poolname obj1)
+ # Remember primary during the backfill
+ local primary=$(get_primary $poolname obj1)
+ local otherosd=$(get_not_primary $poolname obj1)
+
+ ceph osd set nobackfill
+ ceph osd pool set $poolname size 3
+ ceph osd out osd.${otherosd}
+ ceph osd out osd.${primary}
+ # Primary might change before backfill starts
+ sleep 2
+ primary=$(get_primary $poolname obj1)
+ ceph osd unset nobackfill
+ ceph tell osd.$primary get_latest_osdmap
+ ceph tell osd.$primary debug kick_recovery_wq 0
+ sleep 2
+
+ wait_for_clean || return 1
+
+ local misplaced=$(expr $objects \* 2)
+
+ check $dir $PG $primary replicated $objects 0 $misplaced 0 || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+
+# [0,1] -> [2,4,3]/[0,1]
+# degraded 1000 -> 0
+# misplaced 1000 -> 500
+# state ends at active+clean+remapped [2,4,3]/[2,4,3,0]
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 500 0 1000 1000 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-10-30 18:21:45.995149 19'500 23:1817 [2,4,3] 2 [0,1] 0 0'0 2017-10-30 18:21:05.109904 0'0 2017-10-30 18:21:05.109904
+# ENDS:
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 500 0 0 500 0 0 5 5 active+clean+remapped 2017-10-30 18:22:42.293730 19'500 25:2557 [2,4,3] 2 [2,4,3,0] 2 0'0 2017-10-30 18:21:05.109904 0'0 2017-10-30 18:21:05.109904
+function TEST_backfill_sizeup4_allout() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+ run_osd $dir 4 || return 1
+
+ create_pool $poolname 1 1
+ ceph osd pool set $poolname size 2
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ local PG=$(get_pg $poolname obj1)
+ # Remember primary during the backfill
+ local primary=$(get_primary $poolname obj1)
+ local otherosd=$(get_not_primary $poolname obj1)
+
+ ceph osd set nobackfill
+ ceph osd out osd.$otherosd
+ ceph osd out osd.$primary
+ ceph osd pool set $poolname size 4
+ # Primary might change before backfill starts
+ sleep 2
+ primary=$(get_primary $poolname obj1)
+ ceph osd unset nobackfill
+ ceph tell osd.$primary get_latest_osdmap
+ ceph tell osd.$primary debug kick_recovery_wq 0
+ sleep 2
+
+ wait_for_clean || return 1
+
+ local misdeg=$(expr $objects \* 2)
+ check $dir $PG $primary replicated $misdeg 0 $misdeg $objects || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+
+# [1,2,0] -> [3]/[1,2]
+# misplaced 1000 -> 500
+# state ends at active+clean+remapped [3]/[3,1]
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 500 0 0 1000 0 0 100 100 active+remapped+backfilling 2017-11-28 19:13:56.092439 21'500 31:790 [3] 3 [1,2] 1 0'0 2017-11-28 19:13:28.698661 0'0 2017-11-28 19:13:28.698661
+function TEST_backfill_remapped() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+
+ create_pool $poolname 1 1
+ ceph osd pool set $poolname size 3
+ sleep 5
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ local PG=$(get_pg $poolname obj1)
+ # Remember primary during the backfill
+ local primary=$(get_primary $poolname obj1)
+ local otherosd=$(get_not_primary $poolname obj1)
+
+ ceph osd set nobackfill
+ ceph osd out osd.${otherosd}
+ for i in $(get_osds $poolname obj1)
+ do
+ if [ $i = $primary -o $i = $otherosd ];
+ then
+ continue
+ fi
+ ceph osd out osd.$i
+ break
+ done
+ ceph osd out osd.${primary}
+ ceph osd pool set $poolname size 2
+ sleep 2
+
+ # primary may change due to invalidating the old pg_temp, which was [1,2,0],
+ # but up_primary (3) chooses [0,1] for acting.
+ primary=$(get_primary $poolname obj1)
+
+ ceph osd unset nobackfill
+ ceph tell osd.$primary get_latest_osdmap
+ ceph tell osd.$primary debug kick_recovery_wq 0
+
+ sleep 2
+
+ wait_for_clean || return 1
+
+ local misplaced=$(expr $objects \* 2)
+
+ check $dir $PG $primary replicated 0 0 $misplaced $objects "" "" false || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+# [1,0,2] -> [4,3,NONE]/[1,0,2]
+# misplaced 1500 -> 500
+# state ends at active+clean+remapped [4,3,NONE]/[4,3,2]
+
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 500 0 0 1500 0 0 100 100 active+degraded+remapped+backfilling 2017-10-31 16:53:39.467126 19'500 23:615 [4,3,NONE] 4 [1,0,2] 1 0'0 2017-10-31 16:52:59.624429 0'0 2017-10-31 16:52:59.624429
+
+
+# ENDS:
+
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 500 0 0 500 0 0 5 5 active+clean+remapped 2017-10-31 16:48:34.414040 19'500 25:2049 [4,3,NONE] 4 [4,3,2] 4 0'0 2017-10-31 16:46:58.203440 0'0 2017-10-31 16:46:58.203440
+function TEST_backfill_ec_all_out() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+ run_osd $dir 4 || return 1
+
+ ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
+ create_pool $poolname 1 1 erasure myprofile
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ local PG=$(get_pg $poolname obj1)
+ # Remember primary during the backfill
+ local primary=$(get_primary $poolname obj1)
+
+ ceph osd set nobackfill
+ for o in $(get_osds $poolname obj1)
+ do
+ ceph osd out osd.$o
+ done
+ # Primary might change before backfill starts
+ sleep 2
+ primary=$(get_primary $poolname obj1)
+ ceph osd unset nobackfill
+ ceph tell osd.$primary get_latest_osdmap
+ ceph tell osd.$primary debug kick_recovery_wq 0
+ sleep 2
+
+ wait_for_clean || return 1
+
+ local misplaced=$(expr $objects \* 3)
+ check $dir $PG $primary erasure 0 0 $misplaced $objects || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+
+# [1,0,2] -> [4, 0, 2]
+# misplaced 500 -> 0
+# active+remapped+backfilling
+#
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 500 0 0 500 0 0 100 100 active+remapped+backfilling 2017-11-08 18:05:39.036420 24'500 27:742 [4,0,2] 4 [1,0,2] 1 0'0 2017-11-08 18:04:58.697315 0'0 2017-11-08 18:04:58.697315
+function TEST_backfill_ec_prim_out() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+ run_osd $dir 4 || return 1
+
+ ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
+ create_pool $poolname 1 1 erasure myprofile
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ local PG=$(get_pg $poolname obj1)
+ # Remember primary during the backfill
+ local primary=$(get_primary $poolname obj1)
+
+ ceph osd set nobackfill
+ ceph osd out osd.$primary
+ # Primary might change before backfill starts
+ sleep 2
+ primary=$(get_primary $poolname obj1)
+ ceph osd unset nobackfill
+ ceph tell osd.$primary get_latest_osdmap
+ ceph tell osd.$primary debug kick_recovery_wq 0
+ sleep 2
+
+ wait_for_clean || return 1
+
+ local misplaced=$(expr $objects \* 3)
+ check $dir $PG $primary erasure 0 0 $objects 0 || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+# [1,0] -> [1,2]
+# degraded 500 -> 0
+# misplaced 1000 -> 0
+#
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 500 0 500 1000 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-11-06 14:02:29.439105 24'500 29:1020 [4,3,5] 4 [1,NONE,2] 1 0'0 2017-11-06 14:01:46.509963 0'0 2017-11-06 14:01:46.509963
+function TEST_backfill_ec_down_all_out() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+ run_osd $dir 4 || return 1
+ run_osd $dir 5 || return 1
+
+ ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
+ create_pool $poolname 1 1 erasure myprofile
+ ceph osd pool set $poolname min_size 2
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ local PG=$(get_pg $poolname obj1)
+ # Remember primary during the backfill
+ local primary=$(get_primary $poolname obj1)
+ local otherosd=$(get_not_primary $poolname obj1)
+ local allosds=$(get_osds $poolname obj1)
+
+ ceph osd set nobackfill
+ kill $(cat $dir/osd.${otherosd}.pid)
+ ceph osd down osd.${otherosd}
+ for o in $allosds
+ do
+ ceph osd out osd.$o
+ done
+ # Primary might change before backfill starts
+ sleep 2
+ primary=$(get_primary $poolname obj1)
+ ceph osd unset nobackfill
+ ceph tell osd.$primary get_latest_osdmap
+ ceph tell osd.$primary debug kick_recovery_wq 0
+ sleep 2
+ flush_pg_stats
+
+ # Wait for recovery to finish
+ # Can't use wait_for_clean() because state goes from active+undersized+degraded+remapped+backfilling
+ # to active+undersized+remapped
+ while(true)
+ do
+ if test "$(ceph --format json pg dump pgs |
+ jq '.pg_stats | [.[] | .state | select(. == "incomplete")] | length')" -ne "0"
+ then
+ sleep 2
+ continue
+ fi
+ break
+ done
+ ceph pg dump pgs
+ for i in $(seq 1 60)
+ do
+ if ceph pg dump pgs | grep ^$PG | grep -qv backfilling
+ then
+ break
+ fi
+ if [ $i = "60" ];
+ then
+ echo "Timeout waiting for recovery to finish"
+ return 1
+ fi
+ sleep 1
+ done
+
+ ceph pg dump pgs
+
+ local misplaced=$(expr $objects \* 2)
+ check $dir $PG $primary erasure $objects 0 $misplaced 0 || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+
+# [1,0,2] -> [1,3,2]
+# degraded 500 -> 0
+# active+backfilling+degraded
+#
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 500 0 500 0 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-11-06 13:57:25.412322 22'500 28:794 [1,3,2] 1 [1,NONE,2] 1 0'0 2017-11-06 13:54:58.033906 0'0 2017-11-06 13:54:58.033906
+function TEST_backfill_ec_down_out() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+ run_osd $dir 4 || return 1
+ run_osd $dir 5 || return 1
+
+ ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
+ create_pool $poolname 1 1 erasure myprofile
+ ceph osd pool set $poolname min_size 2
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ local PG=$(get_pg $poolname obj1)
+ # Remember primary during the backfill
+ local primary=$(get_primary $poolname obj1)
+ local otherosd=$(get_not_primary $poolname obj1)
+
+ ceph osd set nobackfill
+ kill $(cat $dir/osd.${otherosd}.pid)
+ ceph osd down osd.${otherosd}
+ ceph osd out osd.${otherosd}
+ # Primary might change before backfill starts
+ sleep 2
+ primary=$(get_primary $poolname obj1)
+ ceph osd unset nobackfill
+ ceph tell osd.$primary get_latest_osdmap
+ ceph tell osd.$primary debug kick_recovery_wq 0
+ sleep 2
+
+ wait_for_clean || return 1
+
+ local misplaced=$(expr $objects \* 2)
+ check $dir $PG $primary erasure $objects 0 0 0 || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+
+main osd-backfill-stats "$@"
+
+# Local Variables:
+# compile-command: "make -j4 && ../qa/run-standalone.sh osd-backfill-stats.sh"
+# End:
diff --git a/qa/standalone/osd/osd-bench.sh b/qa/standalone/osd/osd-bench.sh
new file mode 100755
index 00000000..5bcbe377
--- /dev/null
+++ b/qa/standalone/osd/osd-bench.sh
@@ -0,0 +1,96 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
+# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7106" # git grep '\<7106\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_bench() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+
+ local osd_bench_small_size_max_iops=$(CEPH_ARGS='' ceph-conf \
+ --show-config-value osd_bench_small_size_max_iops)
+ local osd_bench_large_size_max_throughput=$(CEPH_ARGS='' ceph-conf \
+ --show-config-value osd_bench_large_size_max_throughput)
+ local osd_bench_max_block_size=$(CEPH_ARGS='' ceph-conf \
+ --show-config-value osd_bench_max_block_size)
+ local osd_bench_duration=$(CEPH_ARGS='' ceph-conf \
+ --show-config-value osd_bench_duration)
+
+ #
+ # block size too high
+ #
+ expect_failure $dir osd_bench_max_block_size \
+ ceph tell osd.0 bench 1024 $((osd_bench_max_block_size + 1)) || return 1
+
+ #
+ # count too high for small (< 1MB) block sizes
+ #
+ local bsize=1024
+ local max_count=$(($bsize * $osd_bench_duration * $osd_bench_small_size_max_iops))
+ expect_failure $dir bench_small_size_max_iops \
+ ceph tell osd.0 bench $(($max_count + 1)) $bsize || return 1
+
+ #
+ # count too high for large (>= 1MB) block sizes
+ #
+ local bsize=$((1024 * 1024 + 1))
+ local max_count=$(($osd_bench_large_size_max_throughput * $osd_bench_duration))
+ expect_failure $dir osd_bench_large_size_max_throughput \
+ ceph tell osd.0 bench $(($max_count + 1)) $bsize || return 1
+
+ #
+ # default values should work
+ #
+ ceph tell osd.0 bench || return 1
+
+ #
+ # test object_size < block_size
+ ceph tell osd.0 bench 10 14456 4444 3
+ #
+
+ #
+ # test object_size < block_size & object_size = 0(default value)
+ #
+ ceph tell osd.0 bench 1 14456
+}
+
+main osd-bench "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/osd/osd-bench.sh"
+# End:
diff --git a/qa/standalone/osd/osd-bluefs-volume-ops.sh b/qa/standalone/osd/osd-bluefs-volume-ops.sh
new file mode 100755
index 00000000..1c9c5cf2
--- /dev/null
+++ b/qa/standalone/osd/osd-bluefs-volume-ops.sh
@@ -0,0 +1,346 @@
+#!/usr/bin/env bash
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+[ `uname` = FreeBSD ] && exit 0
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7146" # git grep '\<7146\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+ CEPH_ARGS+="--bluestore_block_size=2147483648 "
+ CEPH_ARGS+="--bluestore_block_db_create=true "
+ CEPH_ARGS+="--bluestore_block_db_size=1073741824 "
+ CEPH_ARGS+="--bluestore_block_wal_size=536870912 "
+ CEPH_ARGS+="--bluestore_bluefs_min=536870912 "
+ CEPH_ARGS+="--bluestore_bluefs_min_free=536870912 "
+ CEPH_ARGS+="--bluestore_block_wal_create=true "
+ CEPH_ARGS+="--bluestore_fsck_on_mount=true "
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_bluestore() {
+ local dir=$1
+
+ local flimit=$(ulimit -n)
+ if [ $flimit -lt 1536 ]; then
+ echo "Low open file limit ($flimit), test may fail. Increase to 1536 or higher and retry if that happens."
+ fi
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ osd_pid0=$(cat $dir/osd.0.pid)
+ run_osd $dir 1 || return 1
+ osd_pid1=$(cat $dir/osd.1.pid)
+ run_osd $dir 2 || return 1
+ osd_pid2=$(cat $dir/osd.2.pid)
+ run_osd $dir 3 || return 1
+ osd_pid3=$(cat $dir/osd.3.pid)
+
+ sleep 5
+
+ create_pool foo 16
+
+ # write some objects
+ timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1
+
+ echo "after bench"
+
+ # kill
+ while kill $osd_pid0; do sleep 1 ; done
+ ceph osd down 0
+ while kill $osd_pid1; do sleep 1 ; done
+ ceph osd down 1
+ while kill $osd_pid2; do sleep 1 ; done
+ ceph osd down 2
+ while kill $osd_pid3; do sleep 1 ; done
+ ceph osd down 3
+
+ # expand slow devices
+ ceph-bluestore-tool --path $dir/0 fsck || return 1
+ ceph-bluestore-tool --path $dir/1 fsck || return 1
+ ceph-bluestore-tool --path $dir/2 fsck || return 1
+ ceph-bluestore-tool --path $dir/3 fsck || return 1
+
+ truncate $dir/0/block -s 4294967296 # 4GB
+ ceph-bluestore-tool --path $dir/0 bluefs-bdev-expand || return 1
+ truncate $dir/1/block -s 4311744512 # 4GB + 16MB
+ ceph-bluestore-tool --path $dir/1 bluefs-bdev-expand || return 1
+ truncate $dir/2/block -s 4295099392 # 4GB + 129KB
+ ceph-bluestore-tool --path $dir/2 bluefs-bdev-expand || return 1
+ truncate $dir/3/block -s 4293918720 # 4GB - 1MB
+ ceph-bluestore-tool --path $dir/3 bluefs-bdev-expand || return 1
+
+ # slow, DB, WAL -> slow, DB
+ ceph-bluestore-tool --path $dir/0 fsck || return 1
+ ceph-bluestore-tool --path $dir/1 fsck || return 1
+ ceph-bluestore-tool --path $dir/2 fsck || return 1
+ ceph-bluestore-tool --path $dir/3 fsck || return 1
+
+ ceph-bluestore-tool --path $dir/0 bluefs-bdev-sizes
+
+ ceph-bluestore-tool --path $dir/0 \
+ --devs-source $dir/0/block.wal \
+ --dev-target $dir/0/block.db \
+ --command bluefs-bdev-migrate || return 1
+
+ ceph-bluestore-tool --path $dir/0 fsck || return 1
+
+ # slow, DB, WAL -> slow, WAL
+ ceph-bluestore-tool --path $dir/1 \
+ --devs-source $dir/1/block.db \
+ --dev-target $dir/1/block \
+ --command bluefs-bdev-migrate || return 1
+
+ ceph-bluestore-tool --path $dir/1 fsck || return 1
+
+ # slow, DB, WAL -> slow
+ ceph-bluestore-tool --path $dir/2 \
+ --devs-source $dir/2/block.wal \
+ --devs-source $dir/2/block.db \
+ --dev-target $dir/2/block \
+ --command bluefs-bdev-migrate || return 1
+
+ ceph-bluestore-tool --path $dir/2 fsck || return 1
+
+ # slow, DB, WAL -> slow, WAL (negative case)
+ ceph-bluestore-tool --path $dir/3 \
+ --devs-source $dir/3/block.db \
+ --dev-target $dir/3/block.wal \
+ --command bluefs-bdev-migrate
+
+ # Migration to WAL is unsupported
+ if [ $? -eq 0 ]; then
+ return 1
+ fi
+ ceph-bluestore-tool --path $dir/3 fsck || return 1
+
+ # slow, DB, WAL -> slow, DB (WAL to slow then slow to DB)
+ ceph-bluestore-tool --path $dir/3 \
+ --devs-source $dir/3/block.wal \
+ --dev-target $dir/3/block \
+ --command bluefs-bdev-migrate || return 1
+
+ ceph-bluestore-tool --path $dir/3 fsck || return 1
+
+ ceph-bluestore-tool --path $dir/3 \
+ --devs-source $dir/3/block \
+ --dev-target $dir/3/block.db \
+ --command bluefs-bdev-migrate || return 1
+
+ ceph-bluestore-tool --path $dir/3 fsck || return 1
+
+ activate_osd $dir 0 || return 1
+ osd_pid0=$(cat $dir/osd.0.pid)
+ activate_osd $dir 1 || return 1
+ osd_pid1=$(cat $dir/osd.1.pid)
+ activate_osd $dir 2 || return 1
+ osd_pid2=$(cat $dir/osd.2.pid)
+ activate_osd $dir 3 || return 1
+ osd_pid3=$(cat $dir/osd.3.pid)
+
+ wait_for_clean || return 1
+
+ # write some objects
+ timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1
+
+ # kill
+ while kill $osd_pid0; do sleep 1 ; done
+ ceph osd down 0
+ while kill $osd_pid1; do sleep 1 ; done
+ ceph osd down 1
+ while kill $osd_pid2; do sleep 1 ; done
+ ceph osd down 2
+ while kill $osd_pid3; do sleep 1 ; done
+ ceph osd down 3
+
+ # slow, DB -> slow, DB, WAL
+ ceph-bluestore-tool --path $dir/0 fsck || return 1
+
+ dd if=/dev/zero of=$dir/0/wal count=512 bs=1M
+ ceph-bluestore-tool --path $dir/0 \
+ --dev-target $dir/0/wal \
+ --command bluefs-bdev-new-wal || return 1
+
+ ceph-bluestore-tool --path $dir/0 fsck || return 1
+
+ # slow, WAL -> slow, DB, WAL
+ ceph-bluestore-tool --path $dir/1 fsck || return 1
+
+ dd if=/dev/zero of=$dir/1/db count=1024 bs=1M
+ ceph-bluestore-tool --path $dir/1 \
+ --dev-target $dir/1/db \
+ --command bluefs-bdev-new-db || return 1
+
+ ceph-bluestore-tool --path $dir/1 \
+ --devs-source $dir/1/block \
+ --dev-target $dir/1/block.db \
+ --command bluefs-bdev-migrate || return 1
+
+ ceph-bluestore-tool --path $dir/1 fsck || return 1
+
+ # slow -> slow, DB, WAL
+ ceph-bluestore-tool --path $dir/2 fsck || return 1
+
+ ceph-bluestore-tool --path $dir/2 \
+ --command bluefs-bdev-new-db || return 1
+
+ ceph-bluestore-tool --path $dir/2 \
+ --command bluefs-bdev-new-wal || return 1
+
+ ceph-bluestore-tool --path $dir/2 \
+ --devs-source $dir/2/block \
+ --dev-target $dir/2/block.db \
+ --command bluefs-bdev-migrate || return 1
+
+ ceph-bluestore-tool --path $dir/2 fsck || return 1
+
+ # slow, DB -> slow, WAL
+ ceph-bluestore-tool --path $dir/3 fsck || return 1
+
+ ceph-bluestore-tool --path $dir/3 \
+ --command bluefs-bdev-new-wal || return 1
+
+ ceph-bluestore-tool --path $dir/3 \
+ --devs-source $dir/3/block.db \
+ --dev-target $dir/3/block \
+ --command bluefs-bdev-migrate || return 1
+
+ ceph-bluestore-tool --path $dir/3 fsck || return 1
+
+ activate_osd $dir 0 || return 1
+ osd_pid0=$(cat $dir/osd.0.pid)
+ activate_osd $dir 1 || return 1
+ osd_pid1=$(cat $dir/osd.1.pid)
+ activate_osd $dir 2 || return 1
+ osd_pid2=$(cat $dir/osd.2.pid)
+ activate_osd $dir 3 || return 1
+ osd_pid3=$(cat $dir/osd.3.pid)
+
+ # write some objects
+ timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1
+
+ # kill
+ while kill $osd_pid0; do sleep 1 ; done
+ ceph osd down 0
+ while kill $osd_pid1; do sleep 1 ; done
+ ceph osd down 1
+ while kill $osd_pid2; do sleep 1 ; done
+ ceph osd down 2
+ while kill $osd_pid3; do sleep 1 ; done
+ ceph osd down 3
+
+ # slow, DB1, WAL -> slow, DB2, WAL
+ ceph-bluestore-tool --path $dir/0 fsck || return 1
+
+ dd if=/dev/zero of=$dir/0/db2 count=1024 bs=1M
+ ceph-bluestore-tool --path $dir/0 \
+ --devs-source $dir/0/block.db \
+ --dev-target $dir/0/db2 \
+ --command bluefs-bdev-migrate || return 1
+
+ ceph-bluestore-tool --path $dir/0 fsck || return 1
+
+ # slow, DB, WAL1 -> slow, DB, WAL2
+
+ dd if=/dev/zero of=$dir/0/wal2 count=512 bs=1M
+ ceph-bluestore-tool --path $dir/0 \
+ --devs-source $dir/0/block.wal \
+ --dev-target $dir/0/wal2 \
+ --command bluefs-bdev-migrate || return 1
+ rm -rf $dir/0/wal
+
+ ceph-bluestore-tool --path $dir/0 fsck || return 1
+
+ # slow, DB + WAL -> slow, DB2 -> slow
+ ceph-bluestore-tool --path $dir/1 fsck || return 1
+
+ dd if=/dev/zero of=$dir/1/db2 count=1024 bs=1M
+ ceph-bluestore-tool --path $dir/1 \
+ --devs-source $dir/1/block.db \
+ --devs-source $dir/1/block.wal \
+ --dev-target $dir/1/db2 \
+ --command bluefs-bdev-migrate || return 1
+
+ rm -rf $dir/1/db
+
+ ceph-bluestore-tool --path $dir/1 fsck || return 1
+
+ ceph-bluestore-tool --path $dir/1 \
+ --devs-source $dir/1/block.db \
+ --dev-target $dir/1/block \
+ --command bluefs-bdev-migrate || return 1
+
+ rm -rf $dir/1/db2
+
+ ceph-bluestore-tool --path $dir/1 fsck || return 1
+
+ # slow -> slow, DB (negative case)
+ ceph-objectstore-tool --type bluestore --data-path $dir/2 \
+ --op fsck --no-mon-config || return 1
+
+ dd if=/dev/zero of=$dir/2/db2 count=1024 bs=1M
+ ceph-bluestore-tool --path $dir/2 \
+ --devs-source $dir/2/block \
+ --dev-target $dir/2/db2 \
+ --command bluefs-bdev-migrate
+
+ # Migration from slow-only to new device is unsupported
+ if [ $? -eq 0 ]; then
+ return 1
+ fi
+ ceph-bluestore-tool --path $dir/2 fsck || return 1
+
+ # slow + DB + WAL -> slow, DB2
+ dd if=/dev/zero of=$dir/2/db2 count=1024 bs=1M
+
+ ceph-bluestore-tool --path $dir/2 \
+ --devs-source $dir/2/block \
+ --devs-source $dir/2/block.db \
+ --devs-source $dir/2/block.wal \
+ --dev-target $dir/2/db2 \
+ --command bluefs-bdev-migrate || return 1
+
+ ceph-bluestore-tool --path $dir/2 fsck || return 1
+
+ # slow + WAL -> slow2, WAL2
+ dd if=/dev/zero of=$dir/3/wal2 count=1024 bs=1M
+
+ ceph-bluestore-tool --path $dir/3 \
+ --devs-source $dir/3/block \
+ --devs-source $dir/3/block.wal \
+ --dev-target $dir/3/wal2 \
+ --command bluefs-bdev-migrate || return 1
+
+ ceph-bluestore-tool --path $dir/3 fsck || return 1
+
+ activate_osd $dir 0 || return 1
+ osd_pid0=$(cat $dir/osd.0.pid)
+ activate_osd $dir 1 || return 1
+ osd_pid1=$(cat $dir/osd.1.pid)
+ activate_osd $dir 2 || return 1
+ osd_pid2=$(cat $dir/osd.2.pid)
+ activate_osd $dir 3 || return 1
+ osd_pid3=$(cat $dir/osd.3.pid)
+
+ # write some objects
+ timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1
+
+ wait_for_clean || return 1
+}
+
+main osd-bluefs-volume-ops "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/osd/osd-bluefs-volume-ops.sh"
+# End:
diff --git a/qa/standalone/osd/osd-config.sh b/qa/standalone/osd/osd-config.sh
new file mode 100755
index 00000000..126c2f7d
--- /dev/null
+++ b/qa/standalone/osd/osd-config.sh
@@ -0,0 +1,97 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
+# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7100" # git grep '\<7100\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_config_init() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ local stale=1000
+ local cache=500
+ run_osd $dir 0 \
+ --osd-map-cache-size=$cache \
+ --osd-pg-epoch-persisted-max-stale=$stale \
+ || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
+ grep 'is not > osd_pg_epoch_persisted_max_stale' $dir/osd.0.log || return 1
+}
+
+function TEST_config_track() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+
+ local osd_map_cache_size=$(CEPH_ARGS='' ceph-conf \
+ --show-config-value osd_map_cache_size)
+ local osd_pg_epoch_persisted_max_stale=$(CEPH_ARGS='' ceph-conf \
+ --show-config-value osd_pg_epoch_persisted_max_stale)
+
+ #
+ # increase the osd_pg_epoch_persisted_max_stale above the default cache_size
+ #
+ ! grep 'is not > osd_pg_epoch_persisted_max_stale' $dir/osd.0.log || return 1
+ local stale=$(($osd_map_cache_size * 2))
+ ceph tell osd.0 injectargs "--osd-pg-epoch-persisted-max-stale $stale" || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1
+ grep 'is not > osd_pg_epoch_persisted_max_stale' $dir/osd.0.log || return 1
+ rm $dir/osd.0.log
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log reopen || return 1
+}
+
+function TEST_default_adjustment() {
+ a=$(ceph-osd --no-mon-config --show-config-value rgw_torrent_origin)
+ b=$(ceph-osd --no-mon-config --show-config-value rgw_torrent_origin --default-rgw-torrent-origin default)
+ c=$(ceph-osd --no-mon-config --show-config-value rgw_torrent_origin --default-rgw-torrent-origin arg)
+ [ "$a" != "default" ] || return 1
+ [ "$b" = "default" ] || return 1
+ [ "$c" = "arg" ] || return 1
+
+ a=$(ceph-osd --no-mon-config --show-config-value log_to_file)
+ b=$(ceph-osd --no-mon-config --show-config-value log_to_file --default-log-to-file=false)
+ c=$(ceph-osd --no-mon-config --show-config-value log_to_file --default-log-to-file=false --log-to-file)
+ [ "$a" = "true" ] || return 1
+ [ "$b" = "false" ] || return 1
+ [ "$c" = "true" ] || return 1
+}
+
+main osd-config "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/osd/osd-config.sh"
+# End:
diff --git a/qa/standalone/osd/osd-copy-from.sh b/qa/standalone/osd/osd-copy-from.sh
new file mode 100755
index 00000000..8ac0ab54
--- /dev/null
+++ b/qa/standalone/osd/osd-copy-from.sh
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
+# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+# Author: Sage Weil <sage@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7111" # git grep '\<7111\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_copy_from() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ create_rbd_pool || return 1
+
+ # success
+ rados -p rbd put foo $(which rados)
+ rados -p rbd cp foo foo2
+ rados -p rbd stat foo2
+
+ # failure
+ ceph tell osd.\* injectargs -- --osd-debug-inject-copyfrom-error
+ ! rados -p rbd cp foo foo3
+ ! rados -p rbd stat foo3
+
+ # success again
+ ceph tell osd.\* injectargs -- --no-osd-debug-inject-copyfrom-error
+ ! rados -p rbd cp foo foo3
+ rados -p rbd stat foo3
+}
+
+main osd-copy-from "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/osd/osd-bench.sh"
+# End:
diff --git a/qa/standalone/osd/osd-dup.sh b/qa/standalone/osd/osd-dup.sh
new file mode 100755
index 00000000..fdb2649c
--- /dev/null
+++ b/qa/standalone/osd/osd-dup.sh
@@ -0,0 +1,83 @@
+#!/usr/bin/env bash
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+[ `uname` = FreeBSD ] && exit 0
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7146" # git grep '\<7146\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+ # avoid running out of fds in rados bench
+ CEPH_ARGS+="--filestore_wbthrottle_xfs_ios_hard_limit=900 "
+ CEPH_ARGS+="--filestore_wbthrottle_btrfs_ios_hard_limit=900 "
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_filestore_to_bluestore() {
+ local dir=$1
+
+ local flimit=$(ulimit -n)
+ if [ $flimit -lt 1536 ]; then
+ echo "Low open file limit ($flimit), test may fail. Increase to 1536 or higher and retry if that happens."
+ fi
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd_filestore $dir 0 || return 1
+ osd_pid=$(cat $dir/osd.0.pid)
+ run_osd_filestore $dir 1 || return 1
+ run_osd_filestore $dir 2 || return 1
+
+ sleep 5
+
+ create_pool foo 16
+
+ # write some objects
+ timeout 20 rados bench -p foo 10 write -b 4096 --no-cleanup || return 1
+
+ # kill
+ while kill $osd_pid; do sleep 1 ; done
+ ceph osd down 0
+
+ mv $dir/0 $dir/0.old || return 1
+ mkdir $dir/0 || return 1
+ ofsid=$(cat $dir/0.old/fsid)
+ echo "osd fsid $ofsid"
+ O=$CEPH_ARGS
+ CEPH_ARGS+="--log-file $dir/cot.log --log-max-recent 0 "
+ ceph-objectstore-tool --type bluestore --data-path $dir/0 --fsid $ofsid \
+ --op mkfs --no-mon-config || return 1
+ ceph-objectstore-tool --data-path $dir/0.old --target-data-path $dir/0 \
+ --op dup || return 1
+ CEPH_ARGS=$O
+
+ activate_osd $dir 0 || return 1
+
+ while ! ceph osd stat | grep '3 up' ; do sleep 1 ; done
+ ceph osd metadata 0 | grep bluestore || return 1
+
+ ceph osd scrub 0
+
+ # give it some time
+ sleep 15
+ # and make sure mon is sync'ed
+ flush_pg_stats
+
+ wait_for_clean || return 1
+}
+
+main osd-dup "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/osd/osd-dup.sh"
+# End:
diff --git a/qa/standalone/osd/osd-fast-mark-down.sh b/qa/standalone/osd/osd-fast-mark-down.sh
new file mode 100755
index 00000000..cf5851c2
--- /dev/null
+++ b/qa/standalone/osd/osd-fast-mark-down.sh
@@ -0,0 +1,116 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2016 Piotr Dałek <git@predictor.org.pl>
+# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
+#
+# Author: Piotr Dałek <git@predictor.org.pl>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+MAX_PROPAGATION_TIME=30
+
+function run() {
+ local dir=$1
+ shift
+ rm -f $dir/*.pid
+ export CEPH_MON="127.0.0.1:7126" # git grep '\<7126\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+
+ OLD_ARGS=$CEPH_ARGS
+ CEPH_ARGS+="--osd-fast-fail-on-connection-refused=false "
+ echo "Ensuring old behavior is there..."
+ test_fast_kill $dir && (echo "OSDs died too early! Old behavior doesn't work." ; return 1)
+
+ CEPH_ARGS=$OLD_ARGS"--osd-fast-fail-on-connection-refused=true "
+ OLD_ARGS=$CEPH_ARGS
+
+ # force v1 addr here for simple's benefit
+ CEPH_ARGS+="--ms_type=simple --mon-host=v1:$CEPH_MON"
+ echo "Testing simple msgr..."
+ test_fast_kill $dir || return 1
+
+ CEPH_ARGS=$OLD_ARGS"--ms_type=async --mon-host=$CEPH_MON"
+ echo "Testing async msgr..."
+ test_fast_kill $dir || return 1
+
+ return 0
+
+}
+
+function test_fast_kill() {
+ # create cluster with 3 osds
+ setup $dir || return 1
+ run_mon $dir a --osd_pool_default_size=3 || return 1
+ run_mgr $dir x || return 1
+ for oi in {0..2}; do
+ run_osd $dir $oi || return 1
+ pids[$oi]=$(cat $dir/osd.$oi.pid)
+ done
+
+ create_rbd_pool || return 1
+
+ # make some objects so osds to ensure connectivity between osds
+ timeout 20 rados -p rbd bench 10 write -b 4096 --max-objects 128 --no-cleanup || return 1
+ sleep 1
+
+ killid=0
+ previd=0
+
+ # kill random osd and see if after max MAX_PROPAGATION_TIME, the osd count decreased.
+ for i in {1..2}; do
+ while [ $killid -eq $previd ]; do
+ killid=${pids[$RANDOM%${#pids[@]}]}
+ done
+ previd=$killid
+
+ kill -9 $killid
+ time_left=$MAX_PROPAGATION_TIME
+ down_osds=0
+
+ while [ $time_left -gt 0 ]; do
+ sleep 1
+ time_left=$[$time_left - 1];
+
+ grep -m 1 -c -F "ms_handle_refused" $dir/osd.*.log > /dev/null
+ if [ $? -ne 0 ]; then
+ continue
+ fi
+
+ down_osds=$(ceph osd tree | grep -c down)
+ if [ $down_osds -lt $i ]; then
+ # osds not marked down yet, try again in a second
+ continue
+ elif [ $down_osds -gt $i ]; then
+ echo Too many \($down_osds\) osds died!
+ return 1
+ else
+ break
+ fi
+ done
+
+ if [ $down_osds -lt $i ]; then
+ echo Killed the OSD, yet it is not marked down
+ ceph osd tree
+ return 1
+ fi
+ done
+ pkill -SIGTERM rados
+ teardown $dir || return 1
+}
+
+main osd-fast-mark-down "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/osd/osd-fast-mark-down.sh"
+# End:
diff --git a/qa/standalone/osd/osd-force-create-pg.sh b/qa/standalone/osd/osd-force-create-pg.sh
new file mode 100755
index 00000000..f70caac1
--- /dev/null
+++ b/qa/standalone/osd/osd-force-create-pg.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7145" # git grep '\<7145\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ $func $dir || return 1
+ done
+}
+
+function TEST_reuse_id() {
+ local dir=$1
+
+ setup $dir || return 1
+ run_mon $dir a --osd_pool_default_size=1 || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+
+ ceph osd pool create foo 50 || return 1
+ wait_for_clean || return 1
+
+ kill_daemons $dir TERM osd.0
+ kill_daemons $dir TERM osd.1
+ kill_daemons $dir TERM osd.2
+ ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.0 --force
+ ceph-objectstore-tool --data-path $dir/1 --op remove --pgid 1.0 --force
+ ceph-objectstore-tool --data-path $dir/2 --op remove --pgid 1.0 --force
+ activate_osd $dir 0 || return 1
+ activate_osd $dir 1 || return 1
+ activate_osd $dir 2 || return 1
+ sleep 10
+ ceph pg ls | grep 1.0 | grep stale || return 1
+
+ ceph osd force-create-pg 1.0 --yes-i-really-mean-it || return 1
+ wait_for_clean || return 1
+}
+
+main osd-force-create-pg "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/osd/osd-force-create-pg.sh"
+# End:
diff --git a/qa/standalone/osd/osd-markdown.sh b/qa/standalone/osd/osd-markdown.sh
new file mode 100755
index 00000000..6dc1f883
--- /dev/null
+++ b/qa/standalone/osd/osd-markdown.sh
@@ -0,0 +1,131 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2015 Intel <contact@intel.com.com>
+# Copyright (C) 2014, 2015 Red Hat <contact@redhat.com>
+#
+# Author: Xiaoxi Chen <xiaoxi.chen@intel.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7108" # git grep '\<7108\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function markdown_N_impl() {
+ markdown_times=$1
+ total_time=$2
+ sleeptime=$3
+ for i in `seq 1 $markdown_times`
+ do
+ # check the OSD is UP
+ ceph osd tree
+ ceph osd tree | grep osd.0 |grep up || return 1
+ # mark the OSD down.
+ # override any dup setting in the environment to ensure we do this
+ # exactly once (modulo messenger failures, at least; we can't *actually*
+ # provide exactly-once semantics for mon commands).
+ ( unset CEPH_CLI_TEST_DUP_COMMAND ; ceph osd down 0 )
+ sleep $sleeptime
+ done
+}
+
+
+function TEST_markdown_exceed_maxdown_count() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+
+ create_rbd_pool || return 1
+
+ # 3+1 times within 300s, osd should stay dead on the 4th time
+ local count=3
+ local sleeptime=10
+ local period=300
+ ceph tell osd.0 injectargs '--osd_max_markdown_count '$count'' || return 1
+ ceph tell osd.0 injectargs '--osd_max_markdown_period '$period'' || return 1
+
+ markdown_N_impl $(($count+1)) $period $sleeptime
+ # down N+1 times ,the osd.0 should die
+ ceph osd tree | grep down | grep osd.0 || return 1
+}
+
+function TEST_markdown_boot() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+
+ create_rbd_pool || return 1
+
+ # 3 times within 120s, should stay up
+ local count=3
+ local sleeptime=10
+ local period=120
+ ceph tell osd.0 injectargs '--osd_max_markdown_count '$count'' || return 1
+ ceph tell osd.0 injectargs '--osd_max_markdown_period '$period'' || return 1
+
+ markdown_N_impl $count $period $sleeptime
+ #down N times, osd.0 should be up
+ sleep 15 # give osd plenty of time to notice and come back up
+ ceph osd tree | grep up | grep osd.0 || return 1
+}
+
+function TEST_markdown_boot_exceed_time() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+
+ create_rbd_pool || return 1
+
+ # 3+1 times, but over 40s, > 20s, so should stay up
+ local count=3
+ local period=20
+ local sleeptime=10
+ ceph tell osd.0 injectargs '--osd_max_markdown_count '$count'' || return 1
+ ceph tell osd.0 injectargs '--osd_max_markdown_period '$period'' || return 1
+
+ markdown_N_impl $(($count+1)) $period $sleeptime
+ sleep 15 # give osd plenty of time to notice and come back up
+ ceph osd tree | grep up | grep osd.0 || return 1
+}
+
+main osd-markdown "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/osd/osd-bench.sh"
+# End:
diff --git a/qa/standalone/osd/osd-reactivate.sh b/qa/standalone/osd/osd-reactivate.sh
new file mode 100755
index 00000000..6d643862
--- /dev/null
+++ b/qa/standalone/osd/osd-reactivate.sh
@@ -0,0 +1,56 @@
+#!/usr/bin/env bash
+#
+# Author: Vicente Cheng <freeze.bilsted@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7122" # git grep '\<7122\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function TEST_reactivate() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+
+ kill_daemons $dir TERM osd || return 1
+
+ ready_path=$dir"/0/ready"
+ activate_path=$dir"/0/active"
+ # trigger mkfs again
+ rm -rf $ready_path $activate_path
+ activate_osd $dir 0 || return 1
+
+}
+
+main osd-reactivate "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/osd/osd-reactivate.sh"
+# End:
diff --git a/qa/standalone/osd/osd-recovery-prio.sh b/qa/standalone/osd/osd-recovery-prio.sh
new file mode 100755
index 00000000..fb386e26
--- /dev/null
+++ b/qa/standalone/osd/osd-recovery-prio.sh
@@ -0,0 +1,515 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2019 Red Hat <contact@redhat.com>
+#
+# Author: David Zafman <dzafman@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ # Fix port????
+ export CEPH_MON="127.0.0.1:7114" # git grep '\<7114\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON --osd_max_backfills=1 --debug_reserver=20"
+ export objects=200
+ export poolprefix=test
+ export FORCE_PRIO="255" # See OSD_RECOVERY_PRIORITY_FORCED
+ export NORMAL_PRIO="190" # See OSD_RECOVERY_PRIORITY_BASE + 10
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+
+function TEST_recovery_priority() {
+ local dir=$1
+ local pools=10
+ local OSDS=5
+ local max_tries=10
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ export CEPH_ARGS
+
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ run_osd $dir $osd || return 1
+ done
+
+ for p in $(seq 1 $pools)
+ do
+ create_pool "${poolprefix}$p" 1 1
+ ceph osd pool set "${poolprefix}$p" size 2
+ done
+ sleep 5
+
+ wait_for_clean || return 1
+
+ ceph pg dump pgs
+
+ # Find 3 pools with a pg with the same primaries but second
+ # replica on another osd.
+ local PG1
+ local POOLNUM1
+ local pool1
+ local chk_osd1_1
+ local chk_osd1_2
+
+ local PG2
+ local POOLNUM2
+ local pool2
+ local chk_osd2
+
+ local PG3
+ local POOLNUM3
+ local pool3
+
+ for p in $(seq 1 $pools)
+ do
+ ceph pg map ${p}.0 --format=json | jq '.acting[]' > $dir/acting
+ local test_osd1=$(head -1 $dir/acting)
+ local test_osd2=$(tail -1 $dir/acting)
+ if [ -z "$PG1" ];
+ then
+ PG1="${p}.0"
+ POOLNUM1=$p
+ pool1="${poolprefix}$p"
+ chk_osd1_1=$test_osd1
+ chk_osd1_2=$test_osd2
+ elif [ -z "$PG2" -a $chk_osd1_1 = $test_osd1 -a $chk_osd1_2 != $test_osd2 ];
+ then
+ PG2="${p}.0"
+ POOLNUM2=$p
+ pool2="${poolprefix}$p"
+ chk_osd2=$test_osd2
+ elif [ -n "$PG2" -a $chk_osd1_1 = $test_osd1 -a $chk_osd1_2 != $test_osd2 -a "$chk_osd2" != $test_osd2 ];
+ then
+ PG3="${p}.0"
+ POOLNUM3=$p
+ pool3="${poolprefix}$p"
+ break
+ fi
+ done
+ rm -f $dir/acting
+
+ if [ "$pool2" = "" -o "pool3" = "" ];
+ then
+ echo "Failure to find appropirate PGs"
+ return 1
+ fi
+
+ for p in $(seq 1 $pools)
+ do
+ if [ $p != $POOLNUM1 -a $p != $POOLNUM2 -a $p != $POOLNUM3 ];
+ then
+ delete_pool ${poolprefix}$p
+ fi
+ done
+
+ ceph osd pool set $pool2 size 1
+ ceph osd pool set $pool3 size 1
+ wait_for_clean || return 1
+
+ dd if=/dev/urandom of=$dir/data bs=1M count=10
+ p=1
+ for pname in $pool1 $pool2 $pool3
+ do
+ for i in $(seq 1 $objects)
+ do
+ rados -p ${pname} put obj${i}-p${p} $dir/data
+ done
+ p=$(expr $p + 1)
+ done
+
+ local otherosd=$(get_not_primary $pool1 obj1-p1)
+
+ ceph pg dump pgs
+ ERRORS=0
+
+ ceph osd set norecover
+ ceph osd set noout
+
+ # Get a pg to want to recover and quickly force it
+ # to be preempted.
+ ceph osd pool set $pool3 size 2
+ sleep 2
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1
+
+ # 3. Item is in progress, adjust priority with no higher priority waiting
+ for i in $(seq 1 $max_tries)
+ do
+ if ! ceph pg force-recovery $PG3 2>&1 | grep -q "doesn't require recovery"; then
+ break
+ fi
+ if [ "$i" = "$max_tries" ]; then
+ echo "ERROR: Didn't appear to be able to force-recovery"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ sleep 2
+ done
+ flush_pg_stats || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1
+
+ ceph osd out osd.$chk_osd1_2
+ sleep 2
+ flush_pg_stats || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1
+ ceph pg dump pgs
+
+ ceph osd pool set $pool2 size 2
+ sleep 2
+ flush_pg_stats || return 1
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1
+ cat $dir/out
+ ceph pg dump pgs
+
+ PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG1}\")).prio")
+ if [ "$PRIO" != "$NORMAL_PRIO" ];
+ then
+ echo "The normal PG ${PG1} doesn't have prio $NORMAL_PRIO queued waiting"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+
+ # Using eval will strip double-quotes from item
+ eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item')
+ if [ "$ITEM" != ${PG3} ];
+ then
+ echo "The first force-recovery PG $PG3 didn't become the in progress item"
+ ERRORS=$(expr $ERRORS + 1)
+ else
+ PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio')
+ if [ "$PRIO" != $FORCE_PRIO ];
+ then
+ echo "The first force-recovery PG ${PG3} doesn't have prio $FORCE_PRIO"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ fi
+
+ # 1. Item is queued, re-queue with new priority
+ for i in $(seq 1 $max_tries)
+ do
+ if ! ceph pg force-recovery $PG2 2>&1 | grep -q "doesn't require recovery"; then
+ break
+ fi
+ if [ "$i" = "$max_tries" ]; then
+ echo "ERROR: Didn't appear to be able to force-recovery"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ sleep 2
+ done
+ sleep 2
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1
+ cat $dir/out
+ PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG2}\")).prio")
+ if [ "$PRIO" != "$FORCE_PRIO" ];
+ then
+ echo "The second force-recovery PG ${PG2} doesn't have prio $FORCE_PRIO"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ flush_pg_stats || return 1
+
+ # 4. Item is in progress, if higher priority items waiting prempt item
+ #ceph osd unset norecover
+ ceph pg cancel-force-recovery $PG3 || return 1
+ sleep 2
+ #ceph osd set norecover
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1
+ cat $dir/out
+ PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG3}\")).prio")
+ if [ "$PRIO" != "$NORMAL_PRIO" ];
+ then
+ echo "After cancel-recovery PG ${PG3} doesn't have prio $NORMAL_PRIO"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+
+ eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item')
+ if [ "$ITEM" != ${PG2} ];
+ then
+ echo "The force-recovery PG $PG2 didn't become the in progress item"
+ ERRORS=$(expr $ERRORS + 1)
+ else
+ PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio')
+ if [ "$PRIO" != $FORCE_PRIO ];
+ then
+ echo "The first force-recovery PG ${PG2} doesn't have prio $FORCE_PRIO"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ fi
+
+ ceph pg cancel-force-recovery $PG2 || return 1
+ sleep 5
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1
+
+ # 2. Item is queued, re-queue and preempt because new priority higher than an in progress item
+ flush_pg_stats || return 1
+ ceph pg force-recovery $PG3 || return 1
+ sleep 2
+
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1
+ cat $dir/out
+ PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG2}\")).prio")
+ if [ "$PRIO" != "$NORMAL_PRIO" ];
+ then
+ echo "After cancel-force-recovery PG ${PG3} doesn't have prio $NORMAL_PRIO"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+
+ eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item')
+ if [ "$ITEM" != ${PG3} ];
+ then
+ echo "The force-recovery PG $PG3 didn't get promoted to an in progress item"
+ ERRORS=$(expr $ERRORS + 1)
+ else
+ PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio')
+ if [ "$PRIO" != $FORCE_PRIO ];
+ then
+ echo "The force-recovery PG ${PG2} doesn't have prio $FORCE_PRIO"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ fi
+
+ ceph osd unset noout
+ ceph osd unset norecover
+
+ wait_for_clean "CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations" || return 1
+
+ ceph pg dump pgs
+
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_pgstate_history
+
+ if [ $ERRORS != "0" ];
+ then
+ echo "$ERRORS error(s) found"
+ else
+ echo TEST PASSED
+ fi
+
+ delete_pool $pool1
+ delete_pool $pool2
+ delete_pool $pool3
+ kill_daemons $dir || return 1
+ return $ERRORS
+}
+
+#
+# Show that pool recovery_priority is added to recovery priority
+#
+# Create 2 pools with 2 OSDs with different primarys
+# pool 1 with recovery_priority 1
+# pool 2 with recovery_priority 2
+#
+# Start recovery by changing the pool sizes from 1 to 2
+# Use dump_recovery_reservations to verify priorities
+function TEST_recovery_pool_priority() {
+ local dir=$1
+ local pools=3 # Don't assume the first 2 pools are exact what we want
+ local OSDS=2
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ export CEPH_ARGS
+
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ run_osd $dir $osd || return 1
+ done
+
+ for p in $(seq 1 $pools)
+ do
+ create_pool "${poolprefix}$p" 1 1
+ ceph osd pool set "${poolprefix}$p" size 2
+ done
+ sleep 5
+
+ wait_for_clean || return 1
+
+ ceph pg dump pgs
+
+ # Find 2 pools with different primaries which
+ # means the replica must be on another osd.
+ local PG1
+ local POOLNUM1
+ local pool1
+ local chk_osd1_1
+ local chk_osd1_2
+
+ local PG2
+ local POOLNUM2
+ local pool2
+ local chk_osd2_1
+ local chk_osd2_2
+
+ for p in $(seq 1 $pools)
+ do
+ ceph pg map ${p}.0 --format=json | jq '.acting[]' > $dir/acting
+ local test_osd1=$(head -1 $dir/acting)
+ local test_osd2=$(tail -1 $dir/acting)
+ if [ -z "$PG1" ];
+ then
+ PG1="${p}.0"
+ POOLNUM1=$p
+ pool1="${poolprefix}$p"
+ chk_osd1_1=$test_osd1
+ chk_osd1_2=$test_osd2
+ elif [ $chk_osd1_1 != $test_osd1 ];
+ then
+ PG2="${p}.0"
+ POOLNUM2=$p
+ pool2="${poolprefix}$p"
+ chk_osd2_1=$test_osd1
+ chk_osd2_2=$test_osd2
+ break
+ fi
+ done
+ rm -f $dir/acting
+
+ if [ "$pool2" = "" ];
+ then
+ echo "Failure to find appropirate PGs"
+ return 1
+ fi
+
+ for p in $(seq 1 $pools)
+ do
+ if [ $p != $POOLNUM1 -a $p != $POOLNUM2 ];
+ then
+ delete_pool ${poolprefix}$p
+ fi
+ done
+
+ pool1_extra_prio=1
+ pool2_extra_prio=2
+ pool1_prio=$(expr $NORMAL_PRIO + $pool1_extra_prio)
+ pool2_prio=$(expr $NORMAL_PRIO + $pool2_extra_prio)
+
+ ceph osd pool set $pool1 size 1
+ ceph osd pool set $pool1 recovery_priority $pool1_extra_prio
+ ceph osd pool set $pool2 size 1
+ ceph osd pool set $pool2 recovery_priority $pool2_extra_prio
+ wait_for_clean || return 1
+
+ dd if=/dev/urandom of=$dir/data bs=1M count=10
+ p=1
+ for pname in $pool1 $pool2
+ do
+ for i in $(seq 1 $objects)
+ do
+ rados -p ${pname} put obj${i}-p${p} $dir/data
+ done
+ p=$(expr $p + 1)
+ done
+
+ local otherosd=$(get_not_primary $pool1 obj1-p1)
+
+ ceph pg dump pgs
+ ERRORS=0
+
+ ceph osd pool set $pool1 size 2
+ ceph osd pool set $pool2 size 2
+ sleep 10
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/dump.${chk_osd1_1}.out
+ echo osd.${chk_osd1_1}
+ cat $dir/dump.${chk_osd1_1}.out
+ CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_2}) dump_recovery_reservations > $dir/dump.${chk_osd1_2}.out
+ echo osd.${chk_osd1_2}
+ cat $dir/dump.${chk_osd1_2}.out
+
+ # Using eval will strip double-quotes from item
+ eval ITEM=$(cat $dir/dump.${chk_osd1_1}.out | jq '.local_reservations.in_progress[0].item')
+ if [ "$ITEM" != ${PG1} ];
+ then
+ echo "The primary PG for $pool1 didn't become the in progress item"
+ ERRORS=$(expr $ERRORS + 1)
+ else
+ PRIO=$(cat $dir/dump.${chk_osd1_1}.out | jq '.local_reservations.in_progress[0].prio')
+ if [ "$PRIO" != $pool1_prio ];
+ then
+ echo "The primary PG ${PG1} doesn't have prio $pool1_prio"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ fi
+
+ # Using eval will strip double-quotes from item
+ eval ITEM=$(cat $dir/dump.${chk_osd1_2}.out | jq '.remote_reservations.in_progress[0].item')
+ if [ "$ITEM" != ${PG1} ];
+ then
+ echo "The primary PG for $pool1 didn't become the in progress item on remote"
+ ERRORS=$(expr $ERRORS + 1)
+ else
+ PRIO=$(cat $dir/dump.${chk_osd1_2}.out | jq '.remote_reservations.in_progress[0].prio')
+ if [ "$PRIO" != $pool1_prio ];
+ then
+ echo "The primary PG ${PG1} doesn't have prio $pool1_prio on remote"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ fi
+
+ # Using eval will strip double-quotes from item
+ eval ITEM=$(cat $dir/dump.${chk_osd2_1}.out | jq '.local_reservations.in_progress[0].item')
+ if [ "$ITEM" != ${PG2} ];
+ then
+ echo "The primary PG for $pool2 didn't become the in progress item"
+ ERRORS=$(expr $ERRORS + 1)
+ else
+ PRIO=$(cat $dir/dump.${chk_osd2_1}.out | jq '.local_reservations.in_progress[0].prio')
+ if [ "$PRIO" != $pool2_prio ];
+ then
+ echo "The primary PG ${PG2} doesn't have prio $pool2_prio"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ fi
+
+ # Using eval will strip double-quotes from item
+ eval ITEM=$(cat $dir/dump.${chk_osd2_2}.out | jq '.remote_reservations.in_progress[0].item')
+ if [ "$ITEM" != ${PG2} ];
+ then
+ echo "The primary PG $PG2 didn't become the in progress item on remote"
+ ERRORS=$(expr $ERRORS + 1)
+ else
+ PRIO=$(cat $dir/dump.${chk_osd2_2}.out | jq '.remote_reservations.in_progress[0].prio')
+ if [ "$PRIO" != $pool2_prio ];
+ then
+ echo "The primary PG ${PG2} doesn't have prio $pool2_prio on remote"
+ ERRORS=$(expr $ERRORS + 1)
+ fi
+ fi
+
+ wait_for_clean || return 1
+
+ if [ $ERRORS != "0" ];
+ then
+ echo "$ERRORS error(s) found"
+ else
+ echo TEST PASSED
+ fi
+
+ delete_pool $pool1
+ delete_pool $pool2
+ kill_daemons $dir || return 1
+ return $ERRORS
+}
+
+main osd-recovery-prio "$@"
+
+# Local Variables:
+# compile-command: "make -j4 && ../qa/run-standalone.sh osd-recovery-prio.sh"
+# End:
diff --git a/qa/standalone/osd/osd-recovery-space.sh b/qa/standalone/osd/osd-recovery-space.sh
new file mode 100755
index 00000000..82cdf82e
--- /dev/null
+++ b/qa/standalone/osd/osd-recovery-space.sh
@@ -0,0 +1,175 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2018 Red Hat <contact@redhat.com>
+#
+# Author: David Zafman <dzafman@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7221" # git grep '\<7221\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+ CEPH_ARGS+="--osd_max_backfills=10 "
+ export objects=600
+ export poolprefix=test
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+
+function get_num_in_state() {
+ local state=$1
+ local expression
+ expression+="select(contains(\"${state}\"))"
+ ceph --format json pg dump pgs 2>/dev/null | \
+ jq ".pg_stats | [.[] | .state | $expression] | length"
+}
+
+
+function wait_for_state() {
+ local state=$1
+ local cur_in_state
+ local -a delays=($(get_timeout_delays $2 5))
+ local -i loop=0
+
+ flush_pg_stats || return 1
+ while test $(get_num_pgs) == 0 ; do
+ sleep 1
+ done
+
+ while true ; do
+ cur_in_state=$(get_num_in_state ${state})
+ test $cur_in_state -gt 0 && break
+ if (( $loop >= ${#delays[*]} )) ; then
+ ceph pg dump pgs
+ return 1
+ fi
+ sleep ${delays[$loop]}
+ loop+=1
+ done
+ return 0
+}
+
+
+function wait_for_recovery_toofull() {
+ local timeout=$1
+ wait_for_state recovery_toofull $timeout
+}
+
+
+# Create 1 pools with size 1
+# set ful-ratio to 50%
+# Write data 600 5K (3000K)
+# Inject fake_statfs_for_testing to 3600K (83% full)
+# Incresase the pool size to 2
+# The pool shouldn't have room to recovery
+function TEST_recovery_test_simple() {
+ local dir=$1
+ local pools=1
+ local OSDS=2
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ export CEPH_ARGS
+
+ for osd in $(seq 0 $(expr $OSDS - 1))
+ do
+ run_osd $dir $osd || return 1
+ done
+
+ ceph osd set-nearfull-ratio .40
+ ceph osd set-backfillfull-ratio .45
+ ceph osd set-full-ratio .50
+
+ for p in $(seq 1 $pools)
+ do
+ create_pool "${poolprefix}$p" 1 1
+ ceph osd pool set "${poolprefix}$p" size 1
+ done
+
+ wait_for_clean || return 1
+
+ dd if=/dev/urandom of=$dir/datafile bs=1024 count=5
+ for o in $(seq 1 $objects)
+ do
+ rados -p "${poolprefix}$p" put obj$o $dir/datafile
+ done
+
+ for o in $(seq 0 $(expr $OSDS - 1))
+ do
+ ceph tell osd.$o injectargs '--fake_statfs_for_testing 3686400' || return 1
+ done
+ sleep 5
+
+ ceph pg dump pgs
+
+ for p in $(seq 1 $pools)
+ do
+ ceph osd pool set "${poolprefix}$p" size 2
+ done
+
+ # If this times out, we'll detected errors below
+ wait_for_recovery_toofull 30
+
+ ERRORS=0
+ if [ "$(ceph pg dump pgs | grep +recovery_toofull | wc -l)" != "1" ];
+ then
+ echo "One pool should have been in recovery_toofull"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+
+ ceph pg dump pgs
+ ceph status
+ ceph status --format=json-pretty > $dir/stat.json
+
+ eval SEV=$(jq '.health.checks.PG_RECOVERY_FULL.severity' $dir/stat.json)
+ if [ "$SEV" != "HEALTH_ERR" ]; then
+ echo "PG_RECOVERY_FULL severity $SEV not HEALTH_ERR"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+ eval MSG=$(jq '.health.checks.PG_RECOVERY_FULL.summary.message' $dir/stat.json)
+ if [ "$MSG" != "Full OSDs blocking recovery: 1 pg recovery_toofull" ]; then
+ echo "PG_RECOVERY_FULL message '$MSG' mismatched"
+ ERRORS="$(expr $ERRORS + 1)"
+ fi
+ rm -f $dir/stat.json
+
+ if [ $ERRORS != "0" ];
+ then
+ return 1
+ fi
+
+ for i in $(seq 1 $pools)
+ do
+ delete_pool "${poolprefix}$i"
+ done
+ kill_daemons $dir || return 1
+}
+
+
+main osd-recovery-space "$@"
+
+# Local Variables:
+# compile-command: "make -j4 && ../qa/run-standalone.sh osd-recovery-space.sh"
+# End:
diff --git a/qa/standalone/osd/osd-recovery-stats.sh b/qa/standalone/osd/osd-recovery-stats.sh
new file mode 100755
index 00000000..04a28794
--- /dev/null
+++ b/qa/standalone/osd/osd-recovery-stats.sh
@@ -0,0 +1,512 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2017 Red Hat <contact@redhat.com>
+#
+# Author: David Zafman <dzafman@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ # Fix port????
+ export CEPH_MON="127.0.0.1:7115" # git grep '\<7115\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+ # so we will not force auth_log_shard to be acting_primary
+ CEPH_ARGS+="--osd_force_auth_primary_missing_objects=1000000 "
+ export margin=10
+ export objects=200
+ export poolname=test
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function below_margin() {
+ local -i check=$1
+ shift
+ local -i target=$1
+
+ return $(( $check <= $target && $check >= $target - $margin ? 0 : 1 ))
+}
+
+function above_margin() {
+ local -i check=$1
+ shift
+ local -i target=$1
+
+ return $(( $check >= $target && $check <= $target + $margin ? 0 : 1 ))
+}
+
+FIND_UPACT='grep "pg[[]${PG}.*recovering.*_update_calc_stats " $log | tail -1 | sed "s/.*[)] \([[][^ p]*\).*$/\1/"'
+FIND_FIRST='grep "pg[[]${PG}.*recovering.*_update_calc_stats $which " $log | grep -F " ${UPACT}${addp}" | grep -v est | head -1 | sed "s/.* \([0-9]*\)$/\1/"'
+FIND_LAST='grep "pg[[]${PG}.*recovering.*_update_calc_stats $which " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/"'
+
+function check() {
+ local dir=$1
+ local PG=$2
+ local primary=$3
+ local type=$4
+ local degraded_start=$5
+ local degraded_end=$6
+ local misplaced_start=$7
+ local misplaced_end=$8
+ local primary_start=${9:-}
+ local primary_end=${10:-}
+
+ local log=$dir/osd.${primary}.log
+
+ local addp=" "
+ if [ "$type" = "erasure" ];
+ then
+ addp="p"
+ fi
+
+ UPACT=$(eval $FIND_UPACT)
+
+ # Check 3rd line at start because of false recovery starts
+ local which="degraded"
+ FIRST=$(eval $FIND_FIRST)
+ below_margin $FIRST $degraded_start || return 1
+ LAST=$(eval $FIND_LAST)
+ above_margin $LAST $degraded_end || return 1
+
+ # Check 3rd line at start because of false recovery starts
+ which="misplaced"
+ FIRST=$(eval $FIND_FIRST)
+ below_margin $FIRST $misplaced_start || return 1
+ LAST=$(eval $FIND_LAST)
+ above_margin $LAST $misplaced_end || return 1
+
+ # This is the value of set into MISSING_ON_PRIMARY
+ if [ -n "$primary_start" ];
+ then
+ which="shard $primary"
+ FIRST=$(eval $FIND_FIRST)
+ below_margin $FIRST $primary_start || return 1
+ LAST=$(eval $FIND_LAST)
+ above_margin $LAST $primary_end || return 1
+ fi
+}
+
+# [1,0,?] -> [1,2,4]
+# degraded 500 -> 0
+# active+recovering+degraded
+
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 500 0 500 0 0 0 500 500 active+recovering+degraded 2017-11-17 19:27:36.493828 28'500 32:603 [1,2,4] 1 [1,2,4] 1 0'0 2017-11-17 19:27:05.915467 0'0 2017-11-17 19:27:05.915467
+function do_recovery_out1() {
+ local dir=$1
+ shift
+ local type=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+ run_osd $dir 4 || return 1
+ run_osd $dir 5 || return 1
+
+ if [ $type = "erasure" ];
+ then
+ ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
+ create_pool $poolname 1 1 $type myprofile
+ else
+ create_pool $poolname 1 1 $type
+ fi
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ local primary=$(get_primary $poolname obj1)
+ local PG=$(get_pg $poolname obj1)
+ # Only 2 OSDs so only 1 not primary
+ local otherosd=$(get_not_primary $poolname obj1)
+
+ ceph osd set norecover
+ kill $(cat $dir/osd.${otherosd}.pid)
+ ceph osd down osd.${otherosd}
+ ceph osd out osd.${otherosd}
+ ceph osd unset norecover
+ ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0
+ sleep 2
+
+ wait_for_clean || return 1
+
+ check $dir $PG $primary $type $objects 0 0 0 || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+function TEST_recovery_replicated_out1() {
+ local dir=$1
+
+ do_recovery_out1 $dir replicated || return 1
+}
+
+function TEST_recovery_erasure_out1() {
+ local dir=$1
+
+ do_recovery_out1 $dir erasure || return 1
+}
+
+# [0, 1] -> [2,3,4,5]
+# degraded 1000 -> 0
+# misplaced 1000 -> 0
+# missing on primary 500 -> 0
+
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 500 500 1000 1000 0 0 500 500 active+recovering+degraded 2017-10-27 09:38:37.453438 22'500 25:394 [2,4,3,5] 2 [2,4,3,5] 2 0'0 2017-10-27 09:37:58.046748 0'0 2017-10-27 09:37:58.046748
+function TEST_recovery_sizeup() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+ run_osd $dir 4 || return 1
+ run_osd $dir 5 || return 1
+
+ create_pool $poolname 1 1
+ ceph osd pool set $poolname size 2
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ local primary=$(get_primary $poolname obj1)
+ local PG=$(get_pg $poolname obj1)
+ # Only 2 OSDs so only 1 not primary
+ local otherosd=$(get_not_primary $poolname obj1)
+
+ ceph osd set norecover
+ ceph osd out osd.$primary osd.$otherosd
+ ceph osd pool set test size 4
+ ceph osd unset norecover
+ # Get new primary
+ primary=$(get_primary $poolname obj1)
+
+ ceph tell osd.${primary} debug kick_recovery_wq 0
+ sleep 2
+
+ wait_for_clean || return 1
+
+ local degraded=$(expr $objects \* 2)
+ local misplaced=$(expr $objects \* 2)
+ local log=$dir/osd.${primary}.log
+ check $dir $PG $primary replicated $degraded 0 $misplaced 0 $objects 0 || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+# [0, 1, 2, 4] -> [3, 5]
+# misplaced 1000 -> 0
+# missing on primary 500 -> 0
+# active+recovering+degraded
+
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 500 500 0 1000 0 0 500 500 active+recovering+degraded 2017-10-27 09:34:50.012261 22'500 27:118 [3,5] 3 [3,5] 3 0'0 2017-10-27 09:34:08.617248 0'0 2017-10-27 09:34:08.617248
+function TEST_recovery_sizedown() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+ run_osd $dir 4 || return 1
+ run_osd $dir 5 || return 1
+
+ create_pool $poolname 1 1
+ ceph osd pool set $poolname size 4
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ local primary=$(get_primary $poolname obj1)
+ local PG=$(get_pg $poolname obj1)
+ # Only 2 OSDs so only 1 not primary
+ local allosds=$(get_osds $poolname obj1)
+
+ ceph osd set norecover
+ for osd in $allosds
+ do
+ ceph osd out osd.$osd
+ done
+
+ ceph osd pool set test size 2
+ ceph osd unset norecover
+ ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0
+ sleep 2
+
+ wait_for_clean || return 1
+
+ # Get new primary
+ primary=$(get_primary $poolname obj1)
+
+ local misplaced=$(expr $objects \* 2)
+ local log=$dir/osd.${primary}.log
+ check $dir $PG $primary replicated 0 0 $misplaced 0 || return 1
+
+ UPACT=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats " $log | tail -1 | sed "s/.*[)] \([[][^ p]*\).*$/\1/")
+
+ # This is the value of set into MISSING_ON_PRIMARY
+ FIRST=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats shard $primary " $log | grep -F " $UPACT " | head -1 | sed "s/.* \([0-9]*\)$/\1/")
+ below_margin $FIRST $objects || return 1
+ LAST=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats shard $primary " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/")
+ above_margin $LAST 0 || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+# [1] -> [1,2]
+# degraded 300 -> 200
+# active+recovering+undersized+degraded
+
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 100 0 300 0 0 0 100 100 active+recovering+undersized+degraded 2017-11-17 17:16:15.302943 13'500 16:643 [1,2] 1 [1,2] 1 0'0 2017-11-17 17:15:34.985563 0'0 2017-11-17 17:15:34.985563
+function TEST_recovery_undersized() {
+ local dir=$1
+
+ local osds=3
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ for i in $(seq 0 $(expr $osds - 1))
+ do
+ run_osd $dir $i || return 1
+ done
+
+ create_pool $poolname 1 1
+ ceph osd pool set $poolname size 1
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ local primary=$(get_primary $poolname obj1)
+ local PG=$(get_pg $poolname obj1)
+
+ ceph osd set norecover
+ # Mark any osd not the primary (only 1 replica so also has no replica)
+ for i in $(seq 0 $(expr $osds - 1))
+ do
+ if [ $i = $primary ];
+ then
+ continue
+ fi
+ ceph osd out osd.$i
+ break
+ done
+ ceph osd pool set test size 4
+ ceph osd unset norecover
+ ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0
+ # Give extra sleep time because code below doesn't have the sophistication of wait_for_clean()
+ sleep 10
+ flush_pg_stats || return 1
+
+ # Wait for recovery to finish
+ # Can't use wait_for_clean() because state goes from active+recovering+undersized+degraded
+ # to active+undersized+degraded
+ for i in $(seq 1 60)
+ do
+ if ceph pg dump pgs | grep ^$PG | grep -qv recovering
+ then
+ break
+ fi
+ if [ $i = "60" ];
+ then
+ echo "Timeout waiting for recovery to finish"
+ return 1
+ fi
+ sleep 1
+ done
+
+ # Get new primary
+ primary=$(get_primary $poolname obj1)
+ local log=$dir/osd.${primary}.log
+
+ local first_degraded=$(expr $objects \* 3)
+ local last_degraded=$(expr $objects \* 2)
+ check $dir $PG $primary replicated $first_degraded $last_degraded 0 0 || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+# [1,0,2] -> [1,3,NONE]/[1,3,2]
+# degraded 100 -> 0
+# misplaced 100 -> 100
+# active+recovering+degraded+remapped
+
+# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP
+# 1.0 100 0 100 100 0 0 100 100 active+recovering+degraded+remapped 2017-11-27 21:24:20.851243 18'500 23:618 [1,3,NONE] 1 [1,3,2] 1 0'0 2017-11-27 21:23:39.395242 0'0 2017-11-27 21:23:39.395242
+function TEST_recovery_erasure_remapped() {
+ local dir=$1
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+ run_osd $dir 3 || return 1
+
+ ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd
+ create_pool $poolname 1 1 erasure myprofile
+ ceph osd pool set $poolname min_size 2
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ local primary=$(get_primary $poolname obj1)
+ local PG=$(get_pg $poolname obj1)
+ local otherosd=$(get_not_primary $poolname obj1)
+
+ ceph osd set norecover
+ kill $(cat $dir/osd.${otherosd}.pid)
+ ceph osd down osd.${otherosd}
+ ceph osd out osd.${otherosd}
+
+ # Mark osd not the primary and not down/out osd as just out
+ for i in 0 1 2 3
+ do
+ if [ $i = $primary ];
+ then
+ continue
+ fi
+ if [ $i = $otherosd ];
+ then
+ continue
+ fi
+ ceph osd out osd.$i
+ break
+ done
+ ceph osd unset norecover
+ ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0
+ sleep 2
+
+ wait_for_clean || return 1
+
+ local log=$dir/osd.${primary}.log
+ check $dir $PG $primary erasure $objects 0 $objects $objects || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+function TEST_recovery_multi() {
+ local dir=$1
+
+ local osds=6
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ for i in $(seq 0 $(expr $osds - 1))
+ do
+ run_osd $dir $i || return 1
+ done
+
+ create_pool $poolname 1 1
+ ceph osd pool set $poolname size 3
+ ceph osd pool set $poolname min_size 1
+
+ wait_for_clean || return 1
+
+ rados -p $poolname put obj1 /dev/null
+
+ local primary=$(get_primary $poolname obj1)
+ local otherosd=$(get_not_primary $poolname obj1)
+
+ ceph osd set noout
+ ceph osd set norecover
+ kill $(cat $dir/osd.${otherosd}.pid)
+ ceph osd down osd.${otherosd}
+
+ local half=$(expr $objects / 2)
+ for i in $(seq 2 $half)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ kill $(cat $dir/osd.${primary}.pid)
+ ceph osd down osd.${primary}
+ activate_osd $dir ${otherosd}
+ sleep 3
+
+ for i in $(seq $(expr $half + 1) $objects)
+ do
+ rados -p $poolname put obj$i /dev/null
+ done
+
+ local PG=$(get_pg $poolname obj1)
+ local otherosd=$(get_not_primary $poolname obj$objects)
+
+ ceph osd unset noout
+ ceph osd out osd.$primary osd.$otherosd
+ activate_osd $dir ${primary}
+ sleep 3
+
+ ceph osd pool set test size 4
+ ceph osd unset norecover
+ ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0
+ sleep 2
+
+ wait_for_clean || return 1
+
+ # Get new primary
+ primary=$(get_primary $poolname obj1)
+
+ local log=$dir/osd.${primary}.log
+ check $dir $PG $primary replicated 399 0 300 0 99 0 || return 1
+
+ delete_pool $poolname
+ kill_daemons $dir || return 1
+}
+
+main osd-recovery-stats "$@"
+
+# Local Variables:
+# compile-command: "make -j4 && ../qa/run-standalone.sh osd-recovery-stats.sh"
+# End:
diff --git a/qa/standalone/osd/osd-rep-recov-eio.sh b/qa/standalone/osd/osd-rep-recov-eio.sh
new file mode 100755
index 00000000..332a61ac
--- /dev/null
+++ b/qa/standalone/osd/osd-rep-recov-eio.sh
@@ -0,0 +1,476 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2017 Red Hat <contact@redhat.com>
+#
+#
+# Author: Kefu Chai <kchai@redhat.com>
+# Author: David Zafman <dzafman@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+warnings=10
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7140" # git grep '\<7140\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ # set warning amount in case default changes
+ run_mon $dir a --mon_osd_warn_num_repaired=$warnings || return 1
+ run_mgr $dir x || return 1
+ ceph osd pool create foo 8 || return 1
+
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+function setup_osds() {
+ local count=$1
+ shift
+ local type=$1
+
+ for id in $(seq 0 $(expr $count - 1)) ; do
+ run_osd${type} $dir $id || return 1
+ done
+ wait_for_clean || return 1
+}
+
+function get_state() {
+ local pgid=$1
+ local sname=state
+ ceph --format json pg dump pgs 2>/dev/null | \
+ jq -r ".pg_stats | .[] | select(.pgid==\"$pgid\") | .$sname"
+}
+
+function rados_put() {
+ local dir=$1
+ local poolname=$2
+ local objname=${3:-SOMETHING}
+
+ for marker in AAA BBB CCCC DDDD ; do
+ printf "%*s" 1024 $marker
+ done > $dir/ORIGINAL
+ #
+ # get and put an object, compare they are equal
+ #
+ rados --pool $poolname put $objname $dir/ORIGINAL || return 1
+}
+
+function rados_get() {
+ local dir=$1
+ local poolname=$2
+ local objname=${3:-SOMETHING}
+ local expect=${4:-ok}
+
+ #
+ # Expect a failure to get object
+ #
+ if [ $expect = "fail" ];
+ then
+ ! rados --pool $poolname get $objname $dir/COPY
+ return
+ fi
+ #
+ # Expect hang trying to get object
+ #
+ if [ $expect = "hang" ];
+ then
+ timeout 5 rados --pool $poolname get $objname $dir/COPY
+ test "$?" = "124"
+ return
+ fi
+ #
+ # get an object, compare with $dir/ORIGINAL
+ #
+ rados --pool $poolname get $objname $dir/COPY || return 1
+ diff $dir/ORIGINAL $dir/COPY || return 1
+ rm $dir/COPY
+}
+
+function rados_get_data() {
+ local inject=$1
+ shift
+ local dir=$1
+
+ local poolname=pool-rep
+ local objname=obj-$inject-$$
+ local pgid=$(get_pg $poolname $objname)
+
+ rados_put $dir $poolname $objname || return 1
+ inject_$inject rep data $poolname $objname $dir 0 || return 1
+ rados_get $dir $poolname $objname || return 1
+
+ wait_for_clean
+ COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired')
+ test "$COUNT" = "1" || return 1
+ flush_pg_stats
+ COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired")
+ test "$COUNT" = "1" || return 1
+
+ local object_osds=($(get_osds $poolname $objname))
+ local primary=${object_osds[0]}
+ local bad_peer=${object_osds[1]}
+ inject_$inject rep data $poolname $objname $dir 0 || return 1
+ inject_$inject rep data $poolname $objname $dir 1 || return 1
+ # Force primary to pull from the bad peer, so we can repair it too!
+ set_config osd $primary osd_debug_feed_pullee $bad_peer || return 1
+ rados_get $dir $poolname $objname || return 1
+
+ # Wait until automatic repair of bad peer is done
+ wait_for_clean || return 1
+
+ inject_$inject rep data $poolname $objname $dir 0 || return 1
+ inject_$inject rep data $poolname $objname $dir 2 || return 1
+ rados_get $dir $poolname $objname || return 1
+
+ wait_for_clean
+ COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired')
+ test "$COUNT" = "3" || return 1
+ flush_pg_stats
+ COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired")
+ test "$COUNT" = "4" || return 1
+
+ inject_$inject rep data $poolname $objname $dir 0 || return 1
+ inject_$inject rep data $poolname $objname $dir 1 || return 1
+ inject_$inject rep data $poolname $objname $dir 2 || return 1
+ rados_get $dir $poolname $objname hang || return 1
+
+ wait_for_clean
+ # After hang another repair couldn't happen, so count stays the same
+ COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired')
+ test "$COUNT" = "3" || return 1
+ flush_pg_stats
+ COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired")
+ test "$COUNT" = "4" || return 1
+}
+
+function TEST_rados_get_with_eio() {
+ local dir=$1
+
+ setup_osds 4 || return 1
+
+ local poolname=pool-rep
+ create_pool $poolname 1 1 || return 1
+ wait_for_clean || return 1
+ rados_get_data eio $dir || return 1
+
+ delete_pool $poolname
+}
+
+function TEST_rados_repair_warning() {
+ local dir=$1
+ local OBJS=$(expr $warnings + 1)
+
+ setup_osds 4 || return 1
+
+ local poolname=pool-rep
+ create_pool $poolname 1 1 || return 1
+ wait_for_clean || return 1
+
+ local poolname=pool-rep
+ local objbase=obj-warn
+ local inject=eio
+
+ for i in $(seq 1 $OBJS)
+ do
+ rados_put $dir $poolname ${objbase}-$i || return 1
+ inject_$inject rep data $poolname ${objbase}-$i $dir 0 || return 1
+ rados_get $dir $poolname ${objbase}-$i || return 1
+ done
+ local pgid=$(get_pg $poolname ${objbase}-1)
+
+ local object_osds=($(get_osds $poolname ${objbase}-1))
+ local primary=${object_osds[0]}
+ local bad_peer=${object_osds[1]}
+
+ wait_for_clean
+ COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired')
+ test "$COUNT" = "$OBJS" || return 1
+ flush_pg_stats
+ COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired")
+ test "$COUNT" = "$OBJS" || return 1
+
+ ceph health | grep -q "Too many repaired reads on 1 OSDs" || return 1
+ ceph health detail | grep -q "osd.$primary had $OBJS reads repaired" || return 1
+
+ ceph tell osd.$primary clear_shards_repaired
+ sleep 10
+
+ set -o pipefail
+ # Should mute this
+ ceph health | $(! grep -q "Too many repaired reads on 1 OSDs") || return 1
+ set +o pipefail
+
+ ceph tell osd.$primary clear_shards_repaired $OBJS
+ sleep 10
+
+ for i in $(seq 1 $OBJS)
+ do
+ inject_$inject rep data $poolname ${objbase}-$i $dir 0 || return 1
+ inject_$inject rep data $poolname ${objbase}-$i $dir 1 || return 1
+ # Force primary to pull from the bad peer, so we can repair it too!
+ set_config osd $primary osd_debug_feed_pullee $bad_peer || return 1
+ rados_get $dir $poolname ${objbase}-$i || return 1
+ done
+
+ wait_for_clean
+ COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired')
+ test "$COUNT" = "$(expr $OBJS \* 2)" || return 1
+ flush_pg_stats
+ COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired")
+ test "$COUNT" = "$(expr $OBJS \* 3)" || return 1
+
+ # Give mon a chance to notice additional OSD and reset num_shards_repaired
+ # The default tick time is 5 seconds
+ CHECKTIME=10
+ LOOPS=0
+ while(true)
+ do
+ sleep 1
+ if ceph health | grep -q "Too many repaired reads on 2 OSDs"
+ then
+ break
+ fi
+ LOOPS=$(expr $LOOPS + 1)
+ if test "$LOOPS" = "$CHECKTIME"
+ then
+ echo "Too many repaired reads not seen after $CHECKTIME seconds"
+ return 1
+ fi
+ done
+ ceph health detail | grep -q "osd.$primary had $(expr $OBJS \* 2) reads repaired" || return 1
+ ceph health detail | grep -q "osd.$bad_peer had $OBJS reads repaired" || return 1
+
+ delete_pool $poolname
+}
+
+# Test backfill with unfound object
+function TEST_rep_backfill_unfound() {
+ local dir=$1
+ local objname=myobject
+ local lastobj=300
+ # Must be between 1 and $lastobj
+ local testobj=obj250
+
+ export CEPH_ARGS
+ CEPH_ARGS+=' --osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10'
+ setup_osds 3 || return 1
+
+ local poolname=test-pool
+ create_pool $poolname 1 1 || return 1
+ wait_for_clean || return 1
+
+ ceph pg dump pgs
+
+ rados_put $dir $poolname $objname || return 1
+
+ local -a initial_osds=($(get_osds $poolname $objname))
+ local last_osd=${initial_osds[-1]}
+ kill_daemons $dir TERM osd.${last_osd} 2>&2 < /dev/null || return 1
+ ceph osd down ${last_osd} || return 1
+ ceph osd out ${last_osd} || return 1
+
+ ceph pg dump pgs
+
+ dd if=/dev/urandom of=${dir}/ORIGINAL bs=1024 count=4
+ for i in $(seq 1 $lastobj)
+ do
+ rados --pool $poolname put obj${i} $dir/ORIGINAL || return 1
+ done
+
+ inject_eio rep data $poolname $testobj $dir 0 || return 1
+ inject_eio rep data $poolname $testobj $dir 1 || return 1
+
+ activate_osd $dir ${last_osd} || return 1
+ ceph osd in ${last_osd} || return 1
+
+ sleep 15
+
+ for tmp in $(seq 1 100); do
+ state=$(get_state 2.0)
+ echo $state | grep backfill_unfound
+ if [ "$?" = "0" ]; then
+ break
+ fi
+ echo "$state "
+ sleep 1
+ done
+
+ ceph pg dump pgs
+ ceph pg 2.0 list_unfound | grep -q $testobj || return 1
+
+ # Command should hang because object is unfound
+ timeout 5 rados -p $poolname get $testobj $dir/CHECK
+ test $? = "124" || return 1
+
+ ceph pg 2.0 mark_unfound_lost delete
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $lastobj)
+ do
+ if [ obj${i} = "$testobj" ]; then
+ # Doesn't exist anymore
+ ! rados -p $poolname get $testobj $dir/CHECK || return 1
+ else
+ rados --pool $poolname get obj${i} $dir/CHECK || return 1
+ diff -q $dir/ORIGINAL $dir/CHECK || return 1
+ fi
+ done
+
+ rm -f ${dir}/ORIGINAL ${dir}/CHECK
+
+ delete_pool $poolname
+}
+
+# Test recovery with unfound object
+function TEST_rep_recovery_unfound() {
+ local dir=$1
+ local objname=myobject
+ local lastobj=100
+ # Must be between 1 and $lastobj
+ local testobj=obj75
+
+ setup_osds 3 || return 1
+
+ local poolname=test-pool
+ create_pool $poolname 1 1 || return 1
+ wait_for_clean || return 1
+
+ ceph pg dump pgs
+
+ rados_put $dir $poolname $objname || return 1
+
+ local -a initial_osds=($(get_osds $poolname $objname))
+ local last_osd=${initial_osds[-1]}
+ kill_daemons $dir TERM osd.${last_osd} 2>&2 < /dev/null || return 1
+ ceph osd down ${last_osd} || return 1
+ ceph osd out ${last_osd} || return 1
+
+ ceph pg dump pgs
+
+ dd if=/dev/urandom of=${dir}/ORIGINAL bs=1024 count=4
+ for i in $(seq 1 $lastobj)
+ do
+ rados --pool $poolname put obj${i} $dir/ORIGINAL || return 1
+ done
+
+ inject_eio rep data $poolname $testobj $dir 0 || return 1
+ inject_eio rep data $poolname $testobj $dir 1 || return 1
+
+ activate_osd $dir ${last_osd} || return 1
+ ceph osd in ${last_osd} || return 1
+
+ sleep 15
+
+ for tmp in $(seq 1 100); do
+ state=$(get_state 2.0)
+ echo $state | grep -v recovering
+ if [ "$?" = "0" ]; then
+ break
+ fi
+ echo "$state "
+ sleep 1
+ done
+
+ ceph pg dump pgs
+ ceph pg 2.0 list_unfound | grep -q $testobj || return 1
+
+ # Command should hang because object is unfound
+ timeout 5 rados -p $poolname get $testobj $dir/CHECK
+ test $? = "124" || return 1
+
+ ceph pg 2.0 mark_unfound_lost delete
+
+ wait_for_clean || return 1
+
+ for i in $(seq 1 $lastobj)
+ do
+ if [ obj${i} = "$testobj" ]; then
+ # Doesn't exist anymore
+ ! rados -p $poolname get $testobj $dir/CHECK || return 1
+ else
+ rados --pool $poolname get obj${i} $dir/CHECK || return 1
+ diff -q $dir/ORIGINAL $dir/CHECK || return 1
+ fi
+ done
+
+ rm -f ${dir}/ORIGINAL ${dir}/CHECK
+
+ delete_pool $poolname
+}
+
+# This is a filestore only test because it requires data digest in object info
+function TEST_rep_read_unfound() {
+ local dir=$1
+ local objname=myobject
+
+ setup_osds 3 _filestore || return 1
+
+ ceph osd pool delete foo foo --yes-i-really-really-mean-it || return 1
+ local poolname=test-pool
+ create_pool $poolname 1 1 || return 1
+ ceph osd pool set $poolname size 2
+ wait_for_clean || return 1
+
+ ceph pg dump pgs
+
+ dd if=/dev/urandom bs=8k count=1 of=$dir/ORIGINAL
+ rados -p $poolname put $objname $dir/ORIGINAL
+
+ local primary=$(get_primary $poolname $objname)
+ local other=$(get_not_primary $poolname $objname)
+
+ dd if=/dev/urandom bs=8k count=1 of=$dir/CORRUPT
+ objectstore_tool $dir $primary $objname set-bytes $dir/CORRUPT || return 1
+ objectstore_tool $dir $other $objname set-bytes $dir/CORRUPT || return 1
+
+ timeout 30 rados -p $poolname get $objname $dir/tmp &
+
+ sleep 5
+
+ flush_pg_stats
+ ceph --format=json pg dump pgs | jq '.'
+
+ if ! ceph --format=json pg dump pgs | jq '.pg_stats | .[0].state' | grep -q recovery_unfound
+ then
+ echo "Failure to get to recovery_unfound state"
+ return 1
+ fi
+
+ objectstore_tool $dir $other $objname set-bytes $dir/ORIGINAL || return 1
+
+ wait
+
+ if ! cmp $dir/ORIGINAL $dir/tmp
+ then
+ echo "Bad data after primary repair"
+ return 1
+ fi
+}
+
+main osd-rep-recov-eio.sh "$@"
+
+# Local Variables:
+# compile-command: "cd ../../../build ; make -j4 && ../qa/run-standalone.sh osd-rep-recov-eio.sh"
+# End:
diff --git a/qa/standalone/osd/osd-reuse-id.sh b/qa/standalone/osd/osd-reuse-id.sh
new file mode 100755
index 00000000..9fd875d1
--- /dev/null
+++ b/qa/standalone/osd/osd-reuse-id.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2015 Red Hat <contact@redhat.com>
+#
+# Author: Loic Dachary <loic@dachary.org>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7123" # git grep '\<7123\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ $func $dir || return 1
+ done
+}
+
+function TEST_reuse_id() {
+ local dir=$1
+
+ setup $dir || return 1
+ run_mon $dir a --osd_pool_default_size=1 || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ create_rbd_pool || return 1
+ wait_for_clean || return 1
+ destroy_osd $dir 1 || return 1
+ run_osd $dir 1 || return 1
+}
+
+main osd-reuse-id "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/osd/osd-reuse-id.sh"
+# End:
diff --git a/qa/standalone/osd/pg-split-merge.sh b/qa/standalone/osd/pg-split-merge.sh
new file mode 100755
index 00000000..ad697a9f
--- /dev/null
+++ b/qa/standalone/osd/pg-split-merge.sh
@@ -0,0 +1,204 @@
+#!/usr/bin/env bash
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7147" # git grep '\<7147\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON --mon_min_osdmap_epochs=50 --paxos_service_trim_min=10"
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ $func $dir || return 1
+ done
+}
+
+function TEST_a_merge_empty() {
+ local dir=$1
+
+ setup $dir || return 1
+ run_mon $dir a --osd_pool_default_size=3 || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+
+ ceph osd pool create foo 2 || return 1
+ ceph osd pool set foo pgp_num 1 || return 1
+
+ wait_for_clean || return 1
+
+ # note: we need 1.0 to have the same or more objects than 1.1
+ # 1.1
+ rados -p foo put foo1 /etc/passwd
+ rados -p foo put foo2 /etc/passwd
+ rados -p foo put foo3 /etc/passwd
+ rados -p foo put foo4 /etc/passwd
+ # 1.0
+ rados -p foo put foo5 /etc/passwd
+ rados -p foo put foo6 /etc/passwd
+ rados -p foo put foo8 /etc/passwd
+ rados -p foo put foo10 /etc/passwd
+ rados -p foo put foo11 /etc/passwd
+ rados -p foo put foo12 /etc/passwd
+ rados -p foo put foo16 /etc/passwd
+
+ wait_for_clean || return 1
+
+ ceph tell osd.1 config set osd_debug_no_purge_strays true
+ ceph osd pool set foo size 2 || return 1
+ wait_for_clean || return 1
+
+ kill_daemons $dir TERM osd.2 || return 1
+ ceph-objectstore-tool --data-path $dir/2 --op remove --pgid 1.1 --force || return 1
+ activate_osd $dir 2 || return 1
+
+ wait_for_clean || return 1
+
+ # osd.2: now 1.0 is there but 1.1 is not
+
+ # instantiate 1.1 on osd.2 with last_update=0'0 ('empty'), which is
+ # the problematic state... then let it merge with 1.0
+ ceph tell osd.2 config set osd_debug_no_acting_change true
+ ceph osd out 0 1
+ ceph osd pool set foo pg_num 1
+ sleep 5
+ ceph tell osd.2 config set osd_debug_no_acting_change false
+
+ # go back to osd.1 being primary, and 3x so the osd.2 copy doesn't get
+ # removed
+ ceph osd in 0 1
+ ceph osd pool set foo size 3
+
+ wait_for_clean || return 1
+
+ # scrub to ensure the osd.3 copy of 1.0 was incomplete (vs missing
+ # half of its objects).
+ ceph pg scrub 1.0
+ sleep 10
+ ceph log last debug
+ ceph pg ls
+ ceph pg ls | grep ' active.clean ' || return 1
+}
+
+function TEST_import_after_merge_and_gap() {
+ local dir=$1
+
+ setup $dir || return 1
+ run_mon $dir a --osd_pool_default_size=1 || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+
+ ceph osd pool create foo 2 || return 1
+ wait_for_clean || return 1
+ rados -p foo bench 3 write -b 1024 --no-cleanup || return 1
+
+ kill_daemons $dir TERM osd.0 || return 1
+ ceph-objectstore-tool --data-path $dir/0 --op export --pgid 1.1 --file $dir/1.1 --force || return 1
+ ceph-objectstore-tool --data-path $dir/0 --op export --pgid 1.0 --file $dir/1.0 --force || return 1
+ activate_osd $dir 0 || return 1
+
+ ceph osd pool set foo pg_num 1
+ sleep 5
+ while ceph daemon osd.0 perf dump | jq '.osd.numpg' | grep 2 ; do sleep 1 ; done
+ wait_for_clean || return 1
+
+ #
+ kill_daemons $dir TERM osd.0 || return 1
+ ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.0 --force || return 1
+ # this will import both halves the original pg
+ ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.1 --file $dir/1.1 || return 1
+ ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0 || return 1
+ activate_osd $dir 0 || return 1
+
+ wait_for_clean || return 1
+
+ # make a map gap
+ for f in `seq 1 50` ; do
+ ceph osd set nodown
+ ceph osd unset nodown
+ done
+
+ # poke and prod to ensure last_epech_clean is big, reported to mon, and
+ # the osd is able to trim old maps
+ rados -p foo bench 1 write -b 1024 --no-cleanup || return 1
+ wait_for_clean || return 1
+ ceph tell osd.0 send_beacon
+ sleep 5
+ ceph osd set nodown
+ ceph osd unset nodown
+ sleep 5
+
+ kill_daemons $dir TERM osd.0 || return 1
+
+ # this should fail.. 1.1 still doesn't exist
+ ! ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.1 --file $dir/1.1 || return 1
+
+ ceph-objectstore-tool --data-path $dir/0 --op export-remove --pgid 1.0 --force --file $dir/1.0.later || return 1
+
+ # this should fail too because of the gap
+ ! ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.1 --file $dir/1.1 || return 1
+ ! ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0 || return 1
+
+ # we can force it...
+ ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.1 --file $dir/1.1 --force || return 1
+ ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0 --force || return 1
+
+ # ...but the osd won't start, so remove it again.
+ ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.0 --force || return 1
+ ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.1 --force || return 1
+
+ ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0.later --force || return 1
+
+
+ activate_osd $dir 0 || return 1
+
+ wait_for_clean || return 1
+}
+
+function TEST_import_after_split() {
+ local dir=$1
+
+ setup $dir || return 1
+ run_mon $dir a --osd_pool_default_size=1 || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+
+ ceph osd pool create foo 1 || return 1
+ wait_for_clean || return 1
+ rados -p foo bench 3 write -b 1024 --no-cleanup || return 1
+
+ kill_daemons $dir TERM osd.0 || return 1
+ ceph-objectstore-tool --data-path $dir/0 --op export --pgid 1.0 --file $dir/1.0 --force || return 1
+ activate_osd $dir 0 || return 1
+
+ ceph osd pool set foo pg_num 2
+ sleep 5
+ while ceph daemon osd.0 perf dump | jq '.osd.numpg' | grep 1 ; do sleep 1 ; done
+ wait_for_clean || return 1
+
+ kill_daemons $dir TERM osd.0 || return 1
+
+ ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.0 --force || return 1
+
+ # this should fail because 1.1 (split child) is there
+ ! ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0 || return 1
+
+ ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.1 --force || return 1
+ # now it will work (1.1. is gone)
+ ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0 || return 1
+
+ activate_osd $dir 0 || return 1
+
+ wait_for_clean || return 1
+}
+
+
+main pg-split-merge "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && test/osd/pg-split-merge.sh"
+# End:
diff --git a/qa/standalone/osd/repro_long_log.sh b/qa/standalone/osd/repro_long_log.sh
new file mode 100755
index 00000000..97d572e5
--- /dev/null
+++ b/qa/standalone/osd/repro_long_log.sh
@@ -0,0 +1,152 @@
+#!/usr/bin/env bash
+#
+# Copyright (C) 2014 Cloudwatt <libre.licensing@cloudwatt.com>
+# Copyright (C) 2018 Red Hat <contact@redhat.com>
+#
+# Author: Josh Durgin <jdurgin@redhat.com>
+# Author: David Zafman <dzafman@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
+
+function run() {
+ local dir=$1
+ shift
+
+ export CEPH_MON="127.0.0.1:7100" # git grep '\<7100\>' : there must be only one
+ export CEPH_ARGS
+ CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
+ CEPH_ARGS+="--mon-host=$CEPH_MON "
+
+ local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
+ for func in $funcs ; do
+ setup $dir || return 1
+ $func $dir || return 1
+ teardown $dir || return 1
+ done
+}
+
+PGID=
+
+function test_log_size()
+{
+ local PGID=$1
+ local EXPECTED=$2
+ ceph tell osd.\* flush_pg_stats
+ sleep 3
+ ceph pg $PGID query | jq .info.stats.log_size
+ ceph pg $PGID query | jq .info.stats.log_size | grep "${EXPECTED}"
+}
+
+function setup_log_test() {
+ local dir=$1
+ local which=$2
+
+ run_mon $dir a || return 1
+ run_mgr $dir x || return 1
+ run_osd $dir 0 || return 1
+ run_osd $dir 1 || return 1
+ run_osd $dir 2 || return 1
+
+ ceph osd pool create test 1 1 || true
+ POOL_ID=$(ceph osd dump --format json | jq '.pools[] | select(.pool_name == "test") | .pool')
+ PGID="${POOL_ID}.0"
+
+ ceph tell osd.\* injectargs -- --osd-min-pg-log-entries 20 || return 1
+ ceph tell osd.\* injectargs -- --osd-max-pg-log-entries 30 || return 1
+ ceph tell osd.\* injectargs -- --osd-pg-log-trim-min 10 || return 1
+ ceph tell osd.\* injectargs -- --osd-pg-log-dups-tracked 10 || return 1
+
+ touch $dir/foo
+ for i in $(seq 1 20)
+ do
+ rados -p test put foo $dir/foo || return 1
+ done
+
+ test_log_size $PGID 20 || return 1
+
+ rados -p test rm foo || return 1
+
+ # generate error entries
+ for i in $(seq 1 20)
+ do
+ rados -p test rm foo
+ done
+
+ # log should have been trimmed down to min_entries with one extra
+ test_log_size $PGID 21 || return 1
+}
+
+function TEST_repro_long_log1()
+{
+ local dir=$1
+
+ setup_log_test $dir || return 1
+ # regular write should trim the log
+ rados -p test put foo $dir/foo || return 1
+ test_log_size $PGID 22 || return 1
+}
+
+function TEST_repro_long_log2()
+{
+ local dir=$1
+
+ setup_log_test $dir || return 1
+ local PRIMARY=$(ceph pg $PGID query | jq '.info.stats.up_primary')
+ kill_daemons $dir TERM osd.$PRIMARY || return 1
+ CEPH_ARGS="--osd-max-pg-log-entries=2 --no-mon-config" ceph-objectstore-tool --data-path $dir/$PRIMARY --pgid $PGID --op trim-pg-log || return 1
+ activate_osd $dir $PRIMARY || return 1
+ wait_for_clean || return 1
+ test_log_size $PGID 2 || return 1
+}
+
+function TEST_trim_max_entries()
+{
+ local dir=$1
+
+ setup_log_test $dir || return 1
+
+ ceph tell osd.\* injectargs -- --osd-min-pg-log-entries 1
+ ceph tell osd.\* injectargs -- --osd-pg-log-trim-min 2
+ ceph tell osd.\* injectargs -- --osd-pg-log-trim-max 4
+
+ # adding log entries, should only trim 4 and add one each time
+ rados -p test rm foo
+ test_log_size $PGID 17
+ rados -p test rm foo
+ test_log_size $PGID 14
+ rados -p test rm foo
+ test_log_size $PGID 11
+ rados -p test rm foo
+ test_log_size $PGID 8
+ rados -p test rm foo
+ test_log_size $PGID 5
+ rados -p test rm foo
+ test_log_size $PGID 2
+
+ # below trim_min
+ rados -p test rm foo
+ test_log_size $PGID 3
+ rados -p test rm foo
+ test_log_size $PGID 3
+ rados -p test rm foo
+ test_log_size $PGID 3
+ rados -p test rm foo
+ test_log_size $PGID 3
+}
+
+main repro-long-log "$@"
+
+# Local Variables:
+# compile-command: "cd ../.. ; make -j4 && ../qa/run-standalone.sh repro_long_log.sh"
+# End: