summaryrefslogtreecommitdiffstats
path: root/qa/workunits/rbd
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /qa/workunits/rbd
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/workunits/rbd')
-rwxr-xr-xqa/workunits/rbd/cli_generic.sh1715
-rwxr-xr-xqa/workunits/rbd/cli_migration.sh357
-rwxr-xr-xqa/workunits/rbd/concurrent.sh375
-rwxr-xr-xqa/workunits/rbd/crimson/test_crimson_librbd.sh35
-rwxr-xr-xqa/workunits/rbd/diff.sh53
-rwxr-xr-xqa/workunits/rbd/diff_continuous.sh106
-rwxr-xr-xqa/workunits/rbd/huge-tickets.sh41
-rwxr-xr-xqa/workunits/rbd/image_read.sh680
-rwxr-xr-xqa/workunits/rbd/import_export.sh259
-rwxr-xr-xqa/workunits/rbd/issue-20295.sh18
-rwxr-xr-xqa/workunits/rbd/journal.sh326
-rwxr-xr-xqa/workunits/rbd/kernel.sh100
-rwxr-xr-xqa/workunits/rbd/krbd_data_pool.sh206
-rwxr-xr-xqa/workunits/rbd/krbd_exclusive_option.sh233
-rwxr-xr-xqa/workunits/rbd/krbd_fallocate.sh151
-rwxr-xr-xqa/workunits/rbd/krbd_huge_osdmap.sh51
-rwxr-xr-xqa/workunits/rbd/krbd_latest_osdmap_on_map.sh30
-rwxr-xr-xqa/workunits/rbd/krbd_namespaces.sh116
-rwxr-xr-xqa/workunits/rbd/krbd_rxbounce.sh103
-rwxr-xr-xqa/workunits/rbd/krbd_stable_writes.sh141
-rwxr-xr-xqa/workunits/rbd/krbd_udev_enumerate.sh66
-rwxr-xr-xqa/workunits/rbd/krbd_udev_netlink_enobufs.sh24
-rwxr-xr-xqa/workunits/rbd/krbd_udev_netns.sh86
-rwxr-xr-xqa/workunits/rbd/krbd_udev_symlinks.sh116
-rwxr-xr-xqa/workunits/rbd/krbd_wac.sh40
-rwxr-xr-xqa/workunits/rbd/krbd_watch_errors.sh53
-rwxr-xr-xqa/workunits/rbd/luks-encryption.sh217
-rwxr-xr-xqa/workunits/rbd/map-snapshot-io.sh17
-rwxr-xr-xqa/workunits/rbd/map-unmap.sh45
-rwxr-xr-xqa/workunits/rbd/merge_diff.sh477
-rwxr-xr-xqa/workunits/rbd/notify_master.sh5
-rwxr-xr-xqa/workunits/rbd/notify_slave.sh5
-rwxr-xr-xqa/workunits/rbd/permissions.sh269
-rwxr-xr-xqa/workunits/rbd/qemu-iotests.sh47
-rwxr-xr-xqa/workunits/rbd/qemu_dynamic_features.sh46
-rwxr-xr-xqa/workunits/rbd/qemu_rebuild_object_map.sh37
-rwxr-xr-xqa/workunits/rbd/qos.sh90
-rwxr-xr-xqa/workunits/rbd/rbd-ggate.sh239
-rwxr-xr-xqa/workunits/rbd/rbd-nbd.sh500
-rwxr-xr-xqa/workunits/rbd/rbd_groups.sh258
-rwxr-xr-xqa/workunits/rbd/rbd_mirror_bootstrap.sh58
-rwxr-xr-xqa/workunits/rbd/rbd_mirror_fsx_compare.sh38
-rwxr-xr-xqa/workunits/rbd/rbd_mirror_fsx_prepare.sh10
-rwxr-xr-xqa/workunits/rbd/rbd_mirror_ha.sh210
-rwxr-xr-xqa/workunits/rbd/rbd_mirror_helpers.sh1488
-rwxr-xr-xqa/workunits/rbd/rbd_mirror_journal.sh614
-rwxr-xr-xqa/workunits/rbd/rbd_mirror_snapshot.sh517
-rwxr-xr-xqa/workunits/rbd/rbd_mirror_stress.sh221
-rwxr-xr-xqa/workunits/rbd/rbd_support_module_recovery.sh77
-rwxr-xr-xqa/workunits/rbd/read-flags.sh61
-rwxr-xr-xqa/workunits/rbd/simple_big.sh12
-rwxr-xr-xqa/workunits/rbd/test_admin_socket.sh151
-rwxr-xr-xqa/workunits/rbd/test_librbd.sh9
-rwxr-xr-xqa/workunits/rbd/test_librbd_python.sh12
-rwxr-xr-xqa/workunits/rbd/test_lock_fence.sh48
-rwxr-xr-xqa/workunits/rbd/test_rbd_mirror.sh9
-rwxr-xr-xqa/workunits/rbd/test_rbd_tasks.sh276
-rwxr-xr-xqa/workunits/rbd/test_rbdmap_RBDMAPFILE.sh34
-rwxr-xr-xqa/workunits/rbd/verify_pool.sh27
59 files changed, 11605 insertions, 0 deletions
diff --git a/qa/workunits/rbd/cli_generic.sh b/qa/workunits/rbd/cli_generic.sh
new file mode 100755
index 000000000..57279d26d
--- /dev/null
+++ b/qa/workunits/rbd/cli_generic.sh
@@ -0,0 +1,1715 @@
+#!/usr/bin/env bash
+set -ex
+
+. $(dirname $0)/../../standalone/ceph-helpers.sh
+
+export RBD_FORCE_ALLOW_V1=1
+
+# make sure rbd pool is EMPTY.. this is a test script!!
+rbd ls | wc -l | grep -v '^0$' && echo "nonempty rbd pool, aborting! run this script on an empty test cluster only." && exit 1
+
+IMGS="testimg1 testimg2 testimg3 testimg4 testimg5 testimg6 testimg-diff1 testimg-diff2 testimg-diff3 foo foo2 bar bar2 test1 test2 test3 test4 clone2"
+
+expect_fail() {
+ "$@" && return 1 || return 0
+}
+
+tiered=0
+if ceph osd dump | grep ^pool | grep "'rbd'" | grep tier; then
+ tiered=1
+fi
+
+remove_images() {
+ for img in $IMGS
+ do
+ (rbd snap purge $img || true) >/dev/null 2>&1
+ (rbd rm $img || true) >/dev/null 2>&1
+ done
+}
+
+test_others() {
+ echo "testing import, export, resize, and snapshots..."
+ TMP_FILES="/tmp/img1 /tmp/img1.new /tmp/img2 /tmp/img2.new /tmp/img3 /tmp/img3.new /tmp/img-diff1.new /tmp/img-diff2.new /tmp/img-diff3.new /tmp/img1.snap1 /tmp/img1.snap1 /tmp/img-diff1.snap1"
+
+ remove_images
+ rm -f $TMP_FILES
+
+ # create an image
+ dd if=/bin/sh of=/tmp/img1 bs=1k count=1 seek=10
+ dd if=/bin/dd of=/tmp/img1 bs=1k count=10 seek=100
+ dd if=/bin/rm of=/tmp/img1 bs=1k count=100 seek=1000
+ dd if=/bin/ls of=/tmp/img1 bs=1k seek=10000
+ dd if=/bin/ln of=/tmp/img1 bs=1k seek=100000
+
+ # import, snapshot
+ rbd import $RBD_CREATE_ARGS /tmp/img1 testimg1
+ rbd resize testimg1 --size=256 --allow-shrink
+ rbd export testimg1 /tmp/img2
+ rbd snap create testimg1 --snap=snap1
+ rbd resize testimg1 --size=128 && exit 1 || true # shrink should fail
+ rbd resize testimg1 --size=128 --allow-shrink
+ rbd export testimg1 /tmp/img3
+
+ # info
+ rbd info testimg1 | grep 'size 128 MiB'
+ rbd info --snap=snap1 testimg1 | grep 'size 256 MiB'
+
+ # export-diff
+ rm -rf /tmp/diff-testimg1-1 /tmp/diff-testimg1-2
+ rbd export-diff testimg1 --snap=snap1 /tmp/diff-testimg1-1
+ rbd export-diff testimg1 --from-snap=snap1 /tmp/diff-testimg1-2
+
+ # import-diff
+ rbd create $RBD_CREATE_ARGS --size=1 testimg-diff1
+ rbd import-diff --sparse-size 8K /tmp/diff-testimg1-1 testimg-diff1
+ rbd import-diff --sparse-size 8K /tmp/diff-testimg1-2 testimg-diff1
+
+ # info
+ rbd info testimg1 | grep 'size 128 MiB'
+ rbd info --snap=snap1 testimg1 | grep 'size 256 MiB'
+ rbd info testimg-diff1 | grep 'size 128 MiB'
+ rbd info --snap=snap1 testimg-diff1 | grep 'size 256 MiB'
+
+ # make copies
+ rbd copy testimg1 --snap=snap1 testimg2
+ rbd copy testimg1 testimg3
+ rbd copy testimg-diff1 --sparse-size 768K --snap=snap1 testimg-diff2
+ rbd copy testimg-diff1 --sparse-size 768K testimg-diff3
+
+ # verify the result
+ rbd info testimg2 | grep 'size 256 MiB'
+ rbd info testimg3 | grep 'size 128 MiB'
+ rbd info testimg-diff2 | grep 'size 256 MiB'
+ rbd info testimg-diff3 | grep 'size 128 MiB'
+
+ # deep copies
+ rbd deep copy testimg1 testimg4
+ rbd deep copy testimg1 --snap=snap1 testimg5
+ rbd info testimg4 | grep 'size 128 MiB'
+ rbd info testimg5 | grep 'size 256 MiB'
+ rbd snap ls testimg4 | grep -v 'SNAPID' | wc -l | grep 1
+ rbd snap ls testimg4 | grep '.*snap1.*'
+
+ rbd export testimg1 /tmp/img1.new
+ rbd export testimg2 /tmp/img2.new
+ rbd export testimg3 /tmp/img3.new
+ rbd export testimg-diff1 /tmp/img-diff1.new
+ rbd export testimg-diff2 /tmp/img-diff2.new
+ rbd export testimg-diff3 /tmp/img-diff3.new
+
+ cmp /tmp/img2 /tmp/img2.new
+ cmp /tmp/img3 /tmp/img3.new
+ cmp /tmp/img2 /tmp/img-diff2.new
+ cmp /tmp/img3 /tmp/img-diff3.new
+
+ # rollback
+ rbd snap rollback --snap=snap1 testimg1
+ rbd snap rollback --snap=snap1 testimg-diff1
+ rbd info testimg1 | grep 'size 256 MiB'
+ rbd info testimg-diff1 | grep 'size 256 MiB'
+ rbd export testimg1 /tmp/img1.snap1
+ rbd export testimg-diff1 /tmp/img-diff1.snap1
+ cmp /tmp/img2 /tmp/img1.snap1
+ cmp /tmp/img2 /tmp/img-diff1.snap1
+
+ # test create, copy of zero-length images
+ rbd rm testimg2
+ rbd rm testimg3
+ rbd create testimg2 -s 0
+ rbd cp testimg2 testimg3
+ rbd deep cp testimg2 testimg6
+
+ # remove snapshots
+ rbd snap rm --snap=snap1 testimg1
+ rbd snap rm --snap=snap1 testimg-diff1
+ rbd info --snap=snap1 testimg1 2>&1 | grep 'error setting snapshot context: (2) No such file or directory'
+ rbd info --snap=snap1 testimg-diff1 2>&1 | grep 'error setting snapshot context: (2) No such file or directory'
+
+ # sparsify
+ rbd sparsify testimg1
+
+ remove_images
+ rm -f $TMP_FILES
+}
+
+test_rename() {
+ echo "testing rename..."
+ remove_images
+
+ rbd create --image-format 1 -s 1 foo
+ rbd create --image-format 2 -s 1 bar
+ rbd rename foo foo2
+ rbd rename foo2 bar 2>&1 | grep exists
+ rbd rename bar bar2
+ rbd rename bar2 foo2 2>&1 | grep exists
+
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+ rbd create -p rbd2 -s 1 foo
+ rbd rename rbd2/foo rbd2/bar
+ rbd -p rbd2 ls | grep bar
+ rbd rename rbd2/bar foo
+ rbd rename --pool rbd2 foo bar
+ ! rbd rename rbd2/bar --dest-pool rbd foo
+ rbd rename --pool rbd2 bar --dest-pool rbd2 foo
+ rbd -p rbd2 ls | grep foo
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+
+ remove_images
+}
+
+test_ls() {
+ echo "testing ls..."
+ remove_images
+
+ rbd create --image-format 1 -s 1 test1
+ rbd create --image-format 1 -s 1 test2
+ rbd ls | grep test1
+ rbd ls | grep test2
+ rbd ls | wc -l | grep 2
+ # look for fields in output of ls -l without worrying about space
+ rbd ls -l | grep 'test1.*1 MiB.*1'
+ rbd ls -l | grep 'test2.*1 MiB.*1'
+
+ rbd rm test1
+ rbd rm test2
+
+ rbd create --image-format 2 -s 1 test1
+ rbd create --image-format 2 -s 1 test2
+ rbd ls | grep test1
+ rbd ls | grep test2
+ rbd ls | wc -l | grep 2
+ rbd ls -l | grep 'test1.*1 MiB.*2'
+ rbd ls -l | grep 'test2.*1 MiB.*2'
+
+ rbd rm test1
+ rbd rm test2
+
+ rbd create --image-format 2 -s 1 test1
+ rbd create --image-format 1 -s 1 test2
+ rbd ls | grep test1
+ rbd ls | grep test2
+ rbd ls | wc -l | grep 2
+ rbd ls -l | grep 'test1.*1 MiB.*2'
+ rbd ls -l | grep 'test2.*1 MiB.*1'
+ remove_images
+
+ # test that many images can be shown by ls
+ for i in $(seq -w 00 99); do
+ rbd create image.$i -s 1
+ done
+ rbd ls | wc -l | grep 100
+ rbd ls -l | grep image | wc -l | grep 100
+ for i in $(seq -w 00 99); do
+ rbd rm image.$i
+ done
+
+ for i in $(seq -w 00 99); do
+ rbd create image.$i --image-format 2 -s 1
+ done
+ rbd ls | wc -l | grep 100
+ rbd ls -l | grep image | wc -l | grep 100
+ for i in $(seq -w 00 99); do
+ rbd rm image.$i
+ done
+}
+
+test_remove() {
+ echo "testing remove..."
+ remove_images
+
+ rbd remove "NOT_EXIST" && exit 1 || true # remove should fail
+ rbd create --image-format 1 -s 1 test1
+ rbd rm test1
+ rbd ls | wc -l | grep "^0$"
+
+ rbd create --image-format 2 -s 1 test2
+ rbd rm test2
+ rbd ls | wc -l | grep "^0$"
+
+ # check that remove succeeds even if it's
+ # interrupted partway through. simulate this
+ # by removing some objects manually.
+
+ # remove with header missing (old format)
+ rbd create --image-format 1 -s 1 test1
+ rados rm -p rbd test1.rbd
+ rbd rm test1
+ rbd ls | wc -l | grep "^0$"
+
+ if [ $tiered -eq 0 ]; then
+ # remove with header missing
+ rbd create --image-format 2 -s 1 test2
+ HEADER=$(rados -p rbd ls | grep '^rbd_header')
+ rados -p rbd rm $HEADER
+ rbd rm test2
+ rbd ls | wc -l | grep "^0$"
+
+ # remove with id missing
+ rbd create --image-format 2 -s 1 test2
+ rados -p rbd rm rbd_id.test2
+ rbd rm test2
+ rbd ls | wc -l | grep "^0$"
+
+ # remove with header and id missing
+ rbd create --image-format 2 -s 1 test2
+ HEADER=$(rados -p rbd ls | grep '^rbd_header')
+ rados -p rbd rm $HEADER
+ rados -p rbd rm rbd_id.test2
+ rbd rm test2
+ rbd ls | wc -l | grep "^0$"
+ fi
+
+ # remove with rbd_children object missing (and, by extension,
+ # with child not mentioned in rbd_children)
+ rbd create --image-format 2 -s 1 test2
+ rbd snap create test2@snap
+ rbd snap protect test2@snap
+ rbd clone test2@snap clone --rbd-default-clone-format 1
+
+ rados -p rbd rm rbd_children
+ rbd rm clone
+ rbd ls | grep clone | wc -l | grep '^0$'
+
+ rbd snap unprotect test2@snap
+ rbd snap rm test2@snap
+ rbd rm test2
+}
+
+test_locking() {
+ echo "testing locking..."
+ remove_images
+
+ rbd create $RBD_CREATE_ARGS -s 1 test1
+ rbd lock list test1 | wc -l | grep '^0$'
+ rbd lock add test1 id
+ rbd lock list test1 | grep ' 1 '
+ LOCKER=$(rbd lock list test1 | tail -n 1 | awk '{print $1;}')
+ rbd lock remove test1 id $LOCKER
+ rbd lock list test1 | wc -l | grep '^0$'
+
+ rbd lock add test1 id --shared tag
+ rbd lock list test1 | grep ' 1 '
+ rbd lock add test1 id --shared tag
+ rbd lock list test1 | grep ' 2 '
+ rbd lock add test1 id2 --shared tag
+ rbd lock list test1 | grep ' 3 '
+ rbd lock list test1 | tail -n 1 | awk '{print $2, $1;}' | xargs rbd lock remove test1
+ if rbd info test1 | grep -qE "features:.*exclusive"
+ then
+ # new locking functionality requires all locks to be released
+ while [ -n "$(rbd lock list test1)" ]
+ do
+ rbd lock list test1 | tail -n 1 | awk '{print $2, $1;}' | xargs rbd lock remove test1
+ done
+ fi
+ rbd rm test1
+}
+
+test_pool_image_args() {
+ echo "testing pool and image args..."
+ remove_images
+
+ ceph osd pool delete test test --yes-i-really-really-mean-it || true
+ ceph osd pool create test 32
+ rbd pool init test
+ truncate -s 1 /tmp/empty /tmp/empty@snap
+
+ rbd ls | wc -l | grep 0
+ rbd create -s 1 test1
+ rbd ls | grep -q test1
+ rbd import --image test2 /tmp/empty
+ rbd ls | grep -q test2
+ rbd --dest test3 import /tmp/empty
+ rbd ls | grep -q test3
+ rbd import /tmp/empty foo
+ rbd ls | grep -q foo
+
+ # should fail due to "destination snapname specified"
+ rbd import --dest test/empty@snap /tmp/empty && exit 1 || true
+ rbd import /tmp/empty test/empty@snap && exit 1 || true
+ rbd import --image test/empty@snap /tmp/empty && exit 1 || true
+ rbd import /tmp/empty@snap && exit 1 || true
+
+ rbd ls test | wc -l | grep 0
+ rbd import /tmp/empty test/test1
+ rbd ls test | grep -q test1
+ rbd -p test import /tmp/empty test2
+ rbd ls test | grep -q test2
+ rbd --image test3 -p test import /tmp/empty
+ rbd ls test | grep -q test3
+ rbd --image test4 -p test import /tmp/empty
+ rbd ls test | grep -q test4
+ rbd --dest test5 -p test import /tmp/empty
+ rbd ls test | grep -q test5
+ rbd --dest test6 --dest-pool test import /tmp/empty
+ rbd ls test | grep -q test6
+ rbd --image test7 --dest-pool test import /tmp/empty
+ rbd ls test | grep -q test7
+ rbd --image test/test8 import /tmp/empty
+ rbd ls test | grep -q test8
+ rbd --dest test/test9 import /tmp/empty
+ rbd ls test | grep -q test9
+ rbd import --pool test /tmp/empty
+ rbd ls test | grep -q empty
+
+ # copy with no explicit pool goes to pool rbd
+ rbd copy test/test9 test10
+ rbd ls test | grep -qv test10
+ rbd ls | grep -q test10
+ rbd copy test/test9 test/test10
+ rbd ls test | grep -q test10
+ rbd copy --pool test test10 --dest-pool test test11
+ rbd ls test | grep -q test11
+ rbd copy --dest-pool rbd --pool test test11 test12
+ rbd ls | grep test12
+ rbd ls test | grep -qv test12
+
+ rm -f /tmp/empty /tmp/empty@snap
+ ceph osd pool delete test test --yes-i-really-really-mean-it
+
+ for f in foo test1 test10 test12 test2 test3 ; do
+ rbd rm $f
+ done
+}
+
+test_clone() {
+ echo "testing clone..."
+ remove_images
+ rbd create test1 $RBD_CREATE_ARGS -s 1
+ rbd snap create test1@s1
+ rbd snap protect test1@s1
+
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+ rbd clone test1@s1 rbd2/clone
+ rbd -p rbd2 ls | grep clone
+ rbd -p rbd2 ls -l | grep clone | grep test1@s1
+ rbd ls | grep -v clone
+ rbd flatten rbd2/clone
+ rbd snap create rbd2/clone@s1
+ rbd snap protect rbd2/clone@s1
+ rbd clone rbd2/clone@s1 clone2
+ rbd ls | grep clone2
+ rbd ls -l | grep clone2 | grep rbd2/clone@s1
+ rbd -p rbd2 ls | grep -v clone2
+
+ rbd rm clone2
+ rbd snap unprotect rbd2/clone@s1
+ rbd snap rm rbd2/clone@s1
+ rbd rm rbd2/clone
+ rbd snap unprotect test1@s1
+ rbd snap rm test1@s1
+ rbd rm test1
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+}
+
+test_trash() {
+ echo "testing trash..."
+ remove_images
+
+ rbd create $RBD_CREATE_ARGS -s 1 test1
+ rbd create $RBD_CREATE_ARGS -s 1 test2
+ rbd ls | grep test1
+ rbd ls | grep test2
+ rbd ls | wc -l | grep 2
+ rbd ls -l | grep 'test1.*2.*'
+ rbd ls -l | grep 'test2.*2.*'
+
+ rbd trash mv test1
+ rbd ls | grep test2
+ rbd ls | wc -l | grep 1
+ rbd ls -l | grep 'test2.*2.*'
+
+ rbd trash ls | grep test1
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls -l | grep 'test1.*USER.*'
+ rbd trash ls -l | grep -v 'protected until'
+
+ ID=`rbd trash ls | cut -d ' ' -f 1`
+ rbd trash rm $ID
+
+ rbd trash mv test2
+ ID=`rbd trash ls | cut -d ' ' -f 1`
+ rbd info --image-id $ID | grep "rbd image 'test2'"
+
+ rbd trash restore $ID
+ rbd ls | grep test2
+ rbd ls | wc -l | grep 1
+ rbd ls -l | grep 'test2.*2.*'
+
+ rbd trash mv test2 --expires-at "3600 sec"
+ rbd trash ls | grep test2
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls -l | grep 'test2.*USER.*protected until'
+
+ rbd trash rm $ID 2>&1 | grep 'Deferment time has not expired'
+ rbd trash rm --image-id $ID --force
+
+ rbd create $RBD_CREATE_ARGS -s 1 test1
+ rbd snap create test1@snap1
+ rbd snap protect test1@snap1
+ rbd trash mv test1
+
+ rbd trash ls | grep test1
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls -l | grep 'test1.*USER.*'
+ rbd trash ls -l | grep -v 'protected until'
+
+ ID=`rbd trash ls | cut -d ' ' -f 1`
+ rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 1
+ rbd snap ls --image-id $ID | grep '.*snap1.*'
+
+ rbd snap unprotect --image-id $ID --snap snap1
+ rbd snap rm --image-id $ID --snap snap1
+ rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 0
+
+ rbd trash restore $ID
+ rbd snap create test1@snap1
+ rbd snap create test1@snap2
+ rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 2
+ rbd snap purge --image-id $ID
+ rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 0
+
+ rbd rm --rbd_move_to_trash_on_remove=true --rbd_move_to_trash_on_remove_expire_seconds=3600 test1
+ rbd trash ls | grep test1
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls -l | grep 'test1.*USER.*protected until'
+ rbd trash rm $ID 2>&1 | grep 'Deferment time has not expired'
+ rbd trash rm --image-id $ID --force
+
+ remove_images
+}
+
+test_purge() {
+ echo "testing trash purge..."
+ remove_images
+
+ rbd trash ls | wc -l | grep 0
+ rbd trash purge
+
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd create $RBD_CREATE_ARGS --size 256 testimg2
+ rbd trash mv testimg1
+ rbd trash mv testimg2
+ rbd trash ls | wc -l | grep 2
+ rbd trash purge
+ rbd trash ls | wc -l | grep 0
+
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd create $RBD_CREATE_ARGS --size 256 testimg2
+ rbd trash mv testimg1 --expires-at "1 hour"
+ rbd trash mv testimg2 --expires-at "3 hours"
+ rbd trash ls | wc -l | grep 2
+ rbd trash purge
+ rbd trash ls | wc -l | grep 2
+ rbd trash purge --expired-before "now + 2 hours"
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls | grep testimg2
+ rbd trash purge --expired-before "now + 4 hours"
+ rbd trash ls | wc -l | grep 0
+
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd snap create testimg1@snap # pin testimg1
+ rbd create $RBD_CREATE_ARGS --size 256 testimg2
+ rbd create $RBD_CREATE_ARGS --size 256 testimg3
+ rbd trash mv testimg1
+ rbd trash mv testimg2
+ rbd trash mv testimg3
+ rbd trash ls | wc -l | grep 3
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls | grep testimg1
+ ID=$(rbd trash ls | awk '{ print $1 }')
+ rbd snap purge --image-id $ID
+ rbd trash purge
+ rbd trash ls | wc -l | grep 0
+
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd create $RBD_CREATE_ARGS --size 256 testimg2
+ rbd snap create testimg2@snap # pin testimg2
+ rbd create $RBD_CREATE_ARGS --size 256 testimg3
+ rbd trash mv testimg1
+ rbd trash mv testimg2
+ rbd trash mv testimg3
+ rbd trash ls | wc -l | grep 3
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls | grep testimg2
+ ID=$(rbd trash ls | awk '{ print $1 }')
+ rbd snap purge --image-id $ID
+ rbd trash purge
+ rbd trash ls | wc -l | grep 0
+
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd create $RBD_CREATE_ARGS --size 256 testimg2
+ rbd create $RBD_CREATE_ARGS --size 256 testimg3
+ rbd snap create testimg3@snap # pin testimg3
+ rbd trash mv testimg1
+ rbd trash mv testimg2
+ rbd trash mv testimg3
+ rbd trash ls | wc -l | grep 3
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls | grep testimg3
+ ID=$(rbd trash ls | awk '{ print $1 }')
+ rbd snap purge --image-id $ID
+ rbd trash purge
+ rbd trash ls | wc -l | grep 0
+
+ # test purging a clone with a chain of parents
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd snap create testimg1@snap
+ rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2
+ rbd snap rm testimg1@snap
+ rbd create $RBD_CREATE_ARGS --size 256 testimg3
+ rbd snap create testimg2@snap
+ rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4
+ rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5
+ rbd snap rm testimg2@snap
+ rbd snap create testimg4@snap
+ rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6
+ rbd snap rm testimg4@snap
+ rbd trash mv testimg1
+ rbd trash mv testimg2
+ rbd trash mv testimg3
+ rbd trash mv testimg4
+ rbd trash ls | wc -l | grep 4
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 3
+ rbd trash ls | grep testimg1
+ rbd trash ls | grep testimg2
+ rbd trash ls | grep testimg4
+ rbd trash mv testimg6
+ rbd trash ls | wc -l | grep 4
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 2
+ rbd trash ls | grep testimg1
+ rbd trash ls | grep testimg2
+ rbd trash mv testimg5
+ rbd trash ls | wc -l | grep 3
+ rbd trash purge
+ rbd trash ls | wc -l | grep 0
+
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd snap create testimg1@snap
+ rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2
+ rbd snap rm testimg1@snap
+ rbd create $RBD_CREATE_ARGS --size 256 testimg3
+ rbd snap create testimg3@snap # pin testimg3
+ rbd snap create testimg2@snap
+ rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4
+ rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5
+ rbd snap rm testimg2@snap
+ rbd snap create testimg4@snap
+ rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6
+ rbd snap rm testimg4@snap
+ rbd trash mv testimg1
+ rbd trash mv testimg2
+ rbd trash mv testimg3
+ rbd trash mv testimg4
+ rbd trash ls | wc -l | grep 4
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 4
+ rbd trash mv testimg6
+ rbd trash ls | wc -l | grep 5
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 3
+ rbd trash ls | grep testimg1
+ rbd trash ls | grep testimg2
+ rbd trash ls | grep testimg3
+ rbd trash mv testimg5
+ rbd trash ls | wc -l | grep 4
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls | grep testimg3
+ ID=$(rbd trash ls | awk '{ print $1 }')
+ rbd snap purge --image-id $ID
+ rbd trash purge
+ rbd trash ls | wc -l | grep 0
+
+ # test purging a clone with a chain of auto-delete parents
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd snap create testimg1@snap
+ rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2
+ rbd snap rm testimg1@snap
+ rbd create $RBD_CREATE_ARGS --size 256 testimg3
+ rbd snap create testimg2@snap
+ rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4
+ rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5
+ rbd snap rm testimg2@snap
+ rbd snap create testimg4@snap
+ rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6
+ rbd snap rm testimg4@snap
+ rbd rm --rbd_move_parent_to_trash_on_remove=true testimg1
+ rbd rm --rbd_move_parent_to_trash_on_remove=true testimg2
+ rbd trash mv testimg3
+ rbd rm --rbd_move_parent_to_trash_on_remove=true testimg4
+ rbd trash ls | wc -l | grep 4
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 3
+ rbd trash ls | grep testimg1
+ rbd trash ls | grep testimg2
+ rbd trash ls | grep testimg4
+ rbd trash mv testimg6
+ rbd trash ls | wc -l | grep 4
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 2
+ rbd trash ls | grep testimg1
+ rbd trash ls | grep testimg2
+ rbd trash mv testimg5
+ rbd trash ls | wc -l | grep 3
+ rbd trash purge
+ rbd trash ls | wc -l | grep 0
+
+ rbd create $RBD_CREATE_ARGS --size 256 testimg1
+ rbd snap create testimg1@snap
+ rbd clone --rbd-default-clone-format=2 testimg1@snap testimg2
+ rbd snap rm testimg1@snap
+ rbd create $RBD_CREATE_ARGS --size 256 testimg3
+ rbd snap create testimg3@snap # pin testimg3
+ rbd snap create testimg2@snap
+ rbd clone --rbd-default-clone-format=2 testimg2@snap testimg4
+ rbd clone --rbd-default-clone-format=2 testimg2@snap testimg5
+ rbd snap rm testimg2@snap
+ rbd snap create testimg4@snap
+ rbd clone --rbd-default-clone-format=2 testimg4@snap testimg6
+ rbd snap rm testimg4@snap
+ rbd rm --rbd_move_parent_to_trash_on_remove=true testimg1
+ rbd rm --rbd_move_parent_to_trash_on_remove=true testimg2
+ rbd trash mv testimg3
+ rbd rm --rbd_move_parent_to_trash_on_remove=true testimg4
+ rbd trash ls | wc -l | grep 4
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 4
+ rbd trash mv testimg6
+ rbd trash ls | wc -l | grep 5
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 3
+ rbd trash ls | grep testimg1
+ rbd trash ls | grep testimg2
+ rbd trash ls | grep testimg3
+ rbd trash mv testimg5
+ rbd trash ls | wc -l | grep 4
+ rbd trash purge 2>&1 | grep 'some expired images could not be removed'
+ rbd trash ls | wc -l | grep 1
+ rbd trash ls | grep testimg3
+ ID=$(rbd trash ls | awk '{ print $1 }')
+ rbd snap purge --image-id $ID
+ rbd trash purge
+ rbd trash ls | wc -l | grep 0
+}
+
+test_deep_copy_clone() {
+ echo "testing deep copy clone..."
+ remove_images
+
+ rbd create testimg1 $RBD_CREATE_ARGS --size 256
+ rbd snap create testimg1 --snap=snap1
+ rbd snap protect testimg1@snap1
+ rbd clone testimg1@snap1 testimg2
+ rbd snap create testimg2@snap2
+ rbd deep copy testimg2 testimg3
+ rbd info testimg3 | grep 'size 256 MiB'
+ rbd info testimg3 | grep 'parent: rbd/testimg1@snap1'
+ rbd snap ls testimg3 | grep -v 'SNAPID' | wc -l | grep 1
+ rbd snap ls testimg3 | grep '.*snap2.*'
+ rbd info testimg2 | grep 'features:.*deep-flatten' || rbd snap rm testimg2@snap2
+ rbd info testimg3 | grep 'features:.*deep-flatten' || rbd snap rm testimg3@snap2
+ rbd flatten testimg2
+ rbd flatten testimg3
+ rbd snap unprotect testimg1@snap1
+ rbd snap purge testimg2
+ rbd snap purge testimg3
+ rbd rm testimg2
+ rbd rm testimg3
+
+ rbd snap protect testimg1@snap1
+ rbd clone testimg1@snap1 testimg2
+ rbd snap create testimg2@snap2
+ rbd deep copy --flatten testimg2 testimg3
+ rbd info testimg3 | grep 'size 256 MiB'
+ rbd info testimg3 | grep -v 'parent:'
+ rbd snap ls testimg3 | grep -v 'SNAPID' | wc -l | grep 1
+ rbd snap ls testimg3 | grep '.*snap2.*'
+ rbd info testimg2 | grep 'features:.*deep-flatten' || rbd snap rm testimg2@snap2
+ rbd flatten testimg2
+ rbd snap unprotect testimg1@snap1
+
+ remove_images
+}
+
+test_clone_v2() {
+ echo "testing clone v2..."
+ remove_images
+
+ rbd create $RBD_CREATE_ARGS -s 1 test1
+ rbd snap create test1@1
+ rbd clone --rbd-default-clone-format=1 test1@1 test2 && exit 1 || true
+ rbd clone --rbd-default-clone-format=2 test1@1 test2
+ rbd clone --rbd-default-clone-format=2 test1@1 test3
+
+ rbd snap protect test1@1
+ rbd clone --rbd-default-clone-format=1 test1@1 test4
+
+ rbd children test1@1 | sort | tr '\n' ' ' | grep -E "test2.*test3.*test4"
+ rbd children --descendants test1 | sort | tr '\n' ' ' | grep -E "test2.*test3.*test4"
+
+ rbd remove test4
+ rbd snap unprotect test1@1
+
+ rbd snap remove test1@1
+ rbd snap list --all test1 | grep -E "trash \(1\) *$"
+
+ rbd snap create test1@2
+ rbd rm test1 2>&1 | grep 'image has snapshots'
+
+ rbd snap rm test1@2
+ rbd rm test1 2>&1 | grep 'linked clones'
+
+ rbd rm test3
+ rbd rm test1 2>&1 | grep 'linked clones'
+
+ rbd flatten test2
+ rbd snap list --all test1 | wc -l | grep '^0$'
+ rbd rm test1
+ rbd rm test2
+
+ rbd create $RBD_CREATE_ARGS -s 1 test1
+ rbd snap create test1@1
+ rbd snap create test1@2
+ rbd clone test1@1 test2 --rbd-default-clone-format 2
+ rbd clone test1@2 test3 --rbd-default-clone-format 2
+ rbd snap rm test1@1
+ rbd snap rm test1@2
+ expect_fail rbd rm test1
+ rbd rm test1 --rbd-move-parent-to-trash-on-remove=true
+ rbd trash ls -a | grep test1
+ rbd rm test2
+ rbd trash ls -a | grep test1
+ rbd rm test3
+ rbd trash ls -a | expect_fail grep test1
+}
+
+test_thick_provision() {
+ echo "testing thick provision..."
+ remove_images
+
+ # Try to create small and large thick-pro image and
+ # check actual size. (64M and 4G)
+
+ # Small thick-pro image test
+ rbd create $RBD_CREATE_ARGS --thick-provision -s 64M test1
+ count=0
+ ret=""
+ while [ $count -lt 10 ]
+ do
+ rbd du|grep test1|tr -s " "|cut -d " " -f 4-5|grep '^64 MiB' && ret=$?
+ if [ "$ret" = "0" ]
+ then
+ break;
+ fi
+ count=`expr $count + 1`
+ sleep 2
+ done
+ rbd du
+ if [ "$ret" != "0" ]
+ then
+ exit 1
+ fi
+ rbd rm test1
+ rbd ls | grep test1 | wc -l | grep '^0$'
+
+ # Large thick-pro image test
+ rbd create $RBD_CREATE_ARGS --thick-provision -s 4G test1
+ count=0
+ ret=""
+ while [ $count -lt 10 ]
+ do
+ rbd du|grep test1|tr -s " "|cut -d " " -f 4-5|grep '^4 GiB' && ret=$?
+ if [ "$ret" = "0" ]
+ then
+ break;
+ fi
+ count=`expr $count + 1`
+ sleep 2
+ done
+ rbd du
+ if [ "$ret" != "0" ]
+ then
+ exit 1
+ fi
+ rbd rm test1
+ rbd ls | grep test1 | wc -l | grep '^0$'
+}
+
+test_namespace() {
+ echo "testing namespace..."
+ remove_images
+
+ rbd namespace ls | wc -l | grep '^0$'
+ rbd namespace create rbd/test1
+ rbd namespace create --pool rbd --namespace test2
+ rbd namespace create --namespace test3
+ expect_fail rbd namespace create rbd/test3
+
+ rbd namespace list | grep 'test' | wc -l | grep '^3$'
+
+ expect_fail rbd namespace remove --pool rbd missing
+
+ rbd create $RBD_CREATE_ARGS --size 1G rbd/test1/image1
+
+ # default test1 ns to test2 ns clone
+ rbd bench --io-type write --io-pattern rand --io-total 32M --io-size 4K rbd/test1/image1
+ rbd snap create rbd/test1/image1@1
+ rbd clone --rbd-default-clone-format 2 rbd/test1/image1@1 rbd/test2/image1
+ rbd snap rm rbd/test1/image1@1
+ cmp <(rbd export rbd/test1/image1 -) <(rbd export rbd/test2/image1 -)
+ rbd rm rbd/test2/image1
+
+ # default ns to test1 ns clone
+ rbd create $RBD_CREATE_ARGS --size 1G rbd/image2
+ rbd bench --io-type write --io-pattern rand --io-total 32M --io-size 4K rbd/image2
+ rbd snap create rbd/image2@1
+ rbd clone --rbd-default-clone-format 2 rbd/image2@1 rbd/test2/image2
+ rbd snap rm rbd/image2@1
+ cmp <(rbd export rbd/image2 -) <(rbd export rbd/test2/image2 -)
+ expect_fail rbd rm rbd/image2
+ rbd rm rbd/test2/image2
+ rbd rm rbd/image2
+
+ # v1 clones are supported within the same namespace
+ rbd create $RBD_CREATE_ARGS --size 1G rbd/test1/image3
+ rbd snap create rbd/test1/image3@1
+ rbd snap protect rbd/test1/image3@1
+ rbd clone --rbd-default-clone-format 1 rbd/test1/image3@1 rbd/test1/image4
+ rbd rm rbd/test1/image4
+ rbd snap unprotect rbd/test1/image3@1
+ rbd snap rm rbd/test1/image3@1
+ rbd rm rbd/test1/image3
+
+ rbd create $RBD_CREATE_ARGS --size 1G --namespace test1 image2
+ expect_fail rbd namespace remove rbd/test1
+
+ rbd group create rbd/test1/group1
+ rbd group image add rbd/test1/group1 rbd/test1/image1
+ rbd group rm rbd/test1/group1
+
+ rbd trash move rbd/test1/image1
+ ID=`rbd trash --namespace test1 ls | cut -d ' ' -f 1`
+ rbd trash rm rbd/test1/${ID}
+
+ rbd remove rbd/test1/image2
+
+ rbd namespace remove --pool rbd --namespace test1
+ rbd namespace remove --namespace test3
+
+ rbd namespace list | grep 'test' | wc -l | grep '^1$'
+ rbd namespace remove rbd/test2
+}
+
+get_migration_state() {
+ local image=$1
+
+ rbd --format xml status $image |
+ $XMLSTARLET sel -t -v '//status/migration/state'
+}
+
+test_migration() {
+ echo "testing migration..."
+ remove_images
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+
+ # Convert to new format
+ rbd create --image-format 1 -s 128M test1
+ rbd info test1 | grep 'format: 1'
+ rbd migration prepare test1 --image-format 2
+ test "$(get_migration_state test1)" = prepared
+ rbd info test1 | grep 'format: 2'
+ rbd rm test1 && exit 1 || true
+ rbd migration execute test1
+ test "$(get_migration_state test1)" = executed
+ rbd migration commit test1
+ get_migration_state test1 && exit 1 || true
+
+ # Enable layering (and some other features)
+ rbd info test1 | grep 'features: .*layering' && exit 1 || true
+ rbd migration prepare test1 --image-feature \
+ layering,exclusive-lock,object-map,fast-diff,deep-flatten
+ rbd info test1 | grep 'features: .*layering'
+ rbd migration execute test1
+ rbd migration commit test1
+
+ # Migration to other pool
+ rbd migration prepare test1 rbd2/test1
+ test "$(get_migration_state rbd2/test1)" = prepared
+ rbd ls | wc -l | grep '^0$'
+ rbd -p rbd2 ls | grep test1
+ rbd migration execute test1
+ test "$(get_migration_state rbd2/test1)" = executed
+ rbd rm rbd2/test1 && exit 1 || true
+ rbd migration commit test1
+
+ # Migration to other namespace
+ rbd namespace create rbd2/ns1
+ rbd namespace create rbd2/ns2
+ rbd migration prepare rbd2/test1 rbd2/ns1/test1
+ test "$(get_migration_state rbd2/ns1/test1)" = prepared
+ rbd migration execute rbd2/test1
+ test "$(get_migration_state rbd2/ns1/test1)" = executed
+ rbd migration commit rbd2/test1
+ rbd migration prepare rbd2/ns1/test1 rbd2/ns2/test1
+ rbd migration execute rbd2/ns2/test1
+ rbd migration commit rbd2/ns2/test1
+
+ # Enable data pool
+ rbd create -s 128M test1
+ rbd migration prepare test1 --data-pool rbd2
+ rbd info test1 | grep 'data_pool: rbd2'
+ rbd migration execute test1
+ rbd migration commit test1
+
+ # testing trash
+ rbd migration prepare test1
+ expect_fail rbd trash mv test1
+ ID=`rbd trash ls -a | cut -d ' ' -f 1`
+ expect_fail rbd trash rm $ID
+ expect_fail rbd trash restore $ID
+ rbd migration abort test1
+
+ # Migrate parent
+ rbd remove test1
+ dd if=/dev/urandom bs=1M count=1 | rbd --image-format 2 import - test1
+ md5sum=$(rbd export test1 - | md5sum)
+ rbd snap create test1@snap1
+ rbd snap protect test1@snap1
+ rbd snap create test1@snap2
+ rbd clone test1@snap1 clone_v1 --rbd_default_clone_format=1
+ rbd clone test1@snap2 clone_v2 --rbd_default_clone_format=2
+ rbd info clone_v1 | fgrep 'parent: rbd/test1@snap1'
+ rbd info clone_v2 | fgrep 'parent: rbd/test1@snap2'
+ rbd info clone_v2 |grep 'op_features: clone-child'
+ test "$(rbd export clone_v1 - | md5sum)" = "${md5sum}"
+ test "$(rbd export clone_v2 - | md5sum)" = "${md5sum}"
+ test "$(rbd children test1@snap1)" = "rbd/clone_v1"
+ test "$(rbd children test1@snap2)" = "rbd/clone_v2"
+ rbd migration prepare test1 rbd2/test2
+ rbd info clone_v1 | fgrep 'parent: rbd2/test2@snap1'
+ rbd info clone_v2 | fgrep 'parent: rbd2/test2@snap2'
+ rbd info clone_v2 | fgrep 'op_features: clone-child'
+ test "$(rbd children rbd2/test2@snap1)" = "rbd/clone_v1"
+ test "$(rbd children rbd2/test2@snap2)" = "rbd/clone_v2"
+ rbd migration execute test1
+ expect_fail rbd migration commit test1
+ rbd migration commit test1 --force
+ test "$(rbd export clone_v1 - | md5sum)" = "${md5sum}"
+ test "$(rbd export clone_v2 - | md5sum)" = "${md5sum}"
+ rbd migration prepare rbd2/test2 test1
+ rbd info clone_v1 | fgrep 'parent: rbd/test1@snap1'
+ rbd info clone_v2 | fgrep 'parent: rbd/test1@snap2'
+ rbd info clone_v2 | fgrep 'op_features: clone-child'
+ test "$(rbd children test1@snap1)" = "rbd/clone_v1"
+ test "$(rbd children test1@snap2)" = "rbd/clone_v2"
+ rbd migration execute test1
+ expect_fail rbd migration commit test1
+ rbd migration commit test1 --force
+ test "$(rbd export clone_v1 - | md5sum)" = "${md5sum}"
+ test "$(rbd export clone_v2 - | md5sum)" = "${md5sum}"
+ rbd remove clone_v1
+ rbd remove clone_v2
+ rbd snap unprotect test1@snap1
+ rbd snap purge test1
+ rbd rm test1
+
+ for format in 1 2; do
+ # Abort migration after successful prepare
+ rbd create -s 128M --image-format ${format} test2
+ rbd migration prepare test2 --data-pool rbd2
+ rbd bench --io-type write --io-size 1024 --io-total 1024 test2
+ rbd migration abort test2
+ rbd bench --io-type write --io-size 1024 --io-total 1024 test2
+ rbd rm test2
+
+ # Abort migration after successful execute
+ rbd create -s 128M --image-format ${format} test2
+ rbd migration prepare test2 --data-pool rbd2
+ rbd bench --io-type write --io-size 1024 --io-total 1024 test2
+ rbd migration execute test2
+ rbd migration abort test2
+ rbd bench --io-type write --io-size 1024 --io-total 1024 test2
+ rbd rm test2
+
+ # Migration is automatically aborted if prepare failed
+ rbd create -s 128M --image-format ${format} test2
+ rbd migration prepare test2 --data-pool INVALID_DATA_POOL && exit 1 || true
+ rbd bench --io-type write --io-size 1024 --io-total 1024 test2
+ rbd rm test2
+
+ # Abort migration to other pool
+ rbd create -s 128M --image-format ${format} test2
+ rbd migration prepare test2 rbd2/test2
+ rbd bench --io-type write --io-size 1024 --io-total 1024 rbd2/test2
+ rbd migration abort test2
+ rbd bench --io-type write --io-size 1024 --io-total 1024 test2
+ rbd rm test2
+
+ # The same but abort using destination image
+ rbd create -s 128M --image-format ${format} test2
+ rbd migration prepare test2 rbd2/test2
+ rbd migration abort rbd2/test2
+ rbd bench --io-type write --io-size 1024 --io-total 1024 test2
+ rbd rm test2
+
+ test $format = 1 && continue
+
+ # Abort migration to other namespace
+ rbd create -s 128M --image-format ${format} test2
+ rbd migration prepare test2 rbd2/ns1/test3
+ rbd bench --io-type write --io-size 1024 --io-total 1024 rbd2/ns1/test3
+ rbd migration abort test2
+ rbd bench --io-type write --io-size 1024 --io-total 1024 test2
+ rbd rm test2
+ done
+
+ remove_images
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+}
+
+test_config() {
+ echo "testing config..."
+ remove_images
+
+ expect_fail rbd config global set osd rbd_cache true
+ expect_fail rbd config global set global debug_ms 10
+ expect_fail rbd config global set global rbd_UNKNOWN false
+ expect_fail rbd config global set global rbd_cache INVALID
+ rbd config global set global rbd_cache false
+ rbd config global set client rbd_cache true
+ rbd config global set client.123 rbd_cache false
+ rbd config global get global rbd_cache | grep '^false$'
+ rbd config global get client rbd_cache | grep '^true$'
+ rbd config global get client.123 rbd_cache | grep '^false$'
+ expect_fail rbd config global get client.UNKNOWN rbd_cache
+ rbd config global list global | grep '^rbd_cache * false * global *$'
+ rbd config global list client | grep '^rbd_cache * true * client *$'
+ rbd config global list client.123 | grep '^rbd_cache * false * client.123 *$'
+ rbd config global list client.UNKNOWN | grep '^rbd_cache * true * client *$'
+ rbd config global rm client rbd_cache
+ expect_fail rbd config global get client rbd_cache
+ rbd config global list client | grep '^rbd_cache * false * global *$'
+ rbd config global rm client.123 rbd_cache
+ rbd config global rm global rbd_cache
+
+ rbd config pool set rbd rbd_cache true
+ rbd config pool list rbd | grep '^rbd_cache * true * pool *$'
+ rbd config pool get rbd rbd_cache | grep '^true$'
+
+ rbd create $RBD_CREATE_ARGS -s 1 test1
+
+ rbd config image list rbd/test1 | grep '^rbd_cache * true * pool *$'
+ rbd config image set rbd/test1 rbd_cache false
+ rbd config image list rbd/test1 | grep '^rbd_cache * false * image *$'
+ rbd config image get rbd/test1 rbd_cache | grep '^false$'
+ rbd config image remove rbd/test1 rbd_cache
+ expect_fail rbd config image get rbd/test1 rbd_cache
+ rbd config image list rbd/test1 | grep '^rbd_cache * true * pool *$'
+
+ rbd config pool remove rbd rbd_cache
+ expect_fail rbd config pool get rbd rbd_cache
+ rbd config pool list rbd | grep '^rbd_cache * true * config *$'
+
+ rbd rm test1
+}
+
+test_trash_purge_schedule() {
+ echo "testing trash purge schedule..."
+ remove_images
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+ rbd namespace create rbd2/ns1
+
+ test "$(ceph rbd trash purge schedule list)" = "{}"
+ ceph rbd trash purge schedule status | fgrep '"scheduled": []'
+
+ expect_fail rbd trash purge schedule ls
+ test "$(rbd trash purge schedule ls -R --format json)" = "[]"
+
+ rbd trash purge schedule add -p rbd 1d 01:30
+
+ rbd trash purge schedule ls -p rbd | grep 'every 1d starting at 01:30'
+ expect_fail rbd trash purge schedule ls
+ rbd trash purge schedule ls -R | grep 'every 1d starting at 01:30'
+ rbd trash purge schedule ls -R -p rbd | grep 'every 1d starting at 01:30'
+ expect_fail rbd trash purge schedule ls -p rbd2
+ test "$(rbd trash purge schedule ls -p rbd2 -R --format json)" = "[]"
+
+ rbd trash purge schedule add -p rbd2/ns1 2d
+ test "$(rbd trash purge schedule ls -p rbd2 -R --format json)" != "[]"
+ rbd trash purge schedule ls -p rbd2 -R | grep 'rbd2 *ns1 *every 2d'
+ rbd trash purge schedule rm -p rbd2/ns1
+ test "$(rbd trash purge schedule ls -p rbd2 -R --format json)" = "[]"
+
+ for i in `seq 12`; do
+ test "$(rbd trash purge schedule status --format xml |
+ $XMLSTARLET sel -t -v '//scheduled/item/pool')" = 'rbd' && break
+ sleep 10
+ done
+ rbd trash purge schedule status
+ test "$(rbd trash purge schedule status --format xml |
+ $XMLSTARLET sel -t -v '//scheduled/item/pool')" = 'rbd'
+ test "$(rbd trash purge schedule status -p rbd --format xml |
+ $XMLSTARLET sel -t -v '//scheduled/item/pool')" = 'rbd'
+
+ rbd trash purge schedule add 2d 00:17
+ rbd trash purge schedule ls | grep 'every 2d starting at 00:17'
+ rbd trash purge schedule ls -R | grep 'every 2d starting at 00:17'
+ expect_fail rbd trash purge schedule ls -p rbd2
+ rbd trash purge schedule ls -p rbd2 -R | grep 'every 2d starting at 00:17'
+ rbd trash purge schedule ls -p rbd2/ns1 -R | grep 'every 2d starting at 00:17'
+ test "$(rbd trash purge schedule ls -R -p rbd2/ns1 --format xml |
+ $XMLSTARLET sel -t -v '//schedules/schedule/pool')" = "-"
+ test "$(rbd trash purge schedule ls -R -p rbd2/ns1 --format xml |
+ $XMLSTARLET sel -t -v '//schedules/schedule/namespace')" = "-"
+ test "$(rbd trash purge schedule ls -R -p rbd2/ns1 --format xml |
+ $XMLSTARLET sel -t -v '//schedules/schedule/items/item/start_time')" = "00:17:00"
+
+ for i in `seq 12`; do
+ rbd trash purge schedule status --format xml |
+ $XMLSTARLET sel -t -v '//scheduled/item/pool' | grep 'rbd2' && break
+ sleep 10
+ done
+ rbd trash purge schedule status
+ rbd trash purge schedule status --format xml |
+ $XMLSTARLET sel -t -v '//scheduled/item/pool' | grep 'rbd2'
+ echo $(rbd trash purge schedule status --format xml |
+ $XMLSTARLET sel -t -v '//scheduled/item/pool') | grep 'rbd rbd2 rbd2'
+ test "$(rbd trash purge schedule status -p rbd --format xml |
+ $XMLSTARLET sel -t -v '//scheduled/item/pool')" = 'rbd'
+ test "$(echo $(rbd trash purge schedule status -p rbd2 --format xml |
+ $XMLSTARLET sel -t -v '//scheduled/item/pool'))" = 'rbd2 rbd2'
+
+ test "$(echo $(rbd trash purge schedule ls -R --format xml |
+ $XMLSTARLET sel -t -v '//schedules/schedule/items'))" = "2d00:17:00 1d01:30:00"
+
+ rbd trash purge schedule add 1d
+ rbd trash purge schedule ls | grep 'every 2d starting at 00:17'
+ rbd trash purge schedule ls | grep 'every 1d'
+
+ rbd trash purge schedule ls -R --format xml |
+ $XMLSTARLET sel -t -v '//schedules/schedule/items' | grep '2d00:17'
+
+ rbd trash purge schedule rm 1d
+ rbd trash purge schedule ls | grep 'every 2d starting at 00:17'
+ rbd trash purge schedule rm 2d 00:17
+ expect_fail rbd trash purge schedule ls
+
+ for p in rbd2 rbd2/ns1; do
+ rbd create $RBD_CREATE_ARGS -s 1 rbd2/ns1/test1
+ rbd trash mv rbd2/ns1/test1
+ rbd trash ls rbd2/ns1 | wc -l | grep '^1$'
+
+ rbd trash purge schedule add -p $p 1m
+ rbd trash purge schedule list -p rbd2 -R | grep 'every 1m'
+ rbd trash purge schedule list -p rbd2/ns1 -R | grep 'every 1m'
+
+ for i in `seq 12`; do
+ rbd trash ls rbd2/ns1 | wc -l | grep '^1$' || break
+ sleep 10
+ done
+ rbd trash ls rbd2/ns1 | wc -l | grep '^0$'
+
+ # repeat with kicked in schedule, see https://tracker.ceph.com/issues/53915
+ rbd trash purge schedule list -p rbd2 -R | grep 'every 1m'
+ rbd trash purge schedule list -p rbd2/ns1 -R | grep 'every 1m'
+
+ rbd trash purge schedule status | grep 'rbd2 *ns1'
+ rbd trash purge schedule status -p rbd2 | grep 'rbd2 *ns1'
+ rbd trash purge schedule status -p rbd2/ns1 | grep 'rbd2 *ns1'
+
+ rbd trash purge schedule rm -p $p 1m
+ done
+
+ # Negative tests
+ rbd trash purge schedule add 2m
+ expect_fail rbd trash purge schedule add -p rbd dummy
+ expect_fail rbd trash purge schedule add dummy
+ expect_fail rbd trash purge schedule remove -p rbd dummy
+ expect_fail rbd trash purge schedule remove dummy
+ rbd trash purge schedule ls -p rbd | grep 'every 1d starting at 01:30'
+ rbd trash purge schedule ls | grep 'every 2m'
+ rbd trash purge schedule remove -p rbd 1d 01:30
+ rbd trash purge schedule remove 2m
+ test "$(rbd trash purge schedule ls -R --format json)" = "[]"
+
+ remove_images
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+}
+
+test_trash_purge_schedule_recovery() {
+ echo "testing recovery of trash_purge_schedule handler after module's RADOS client is blocklisted..."
+ remove_images
+ ceph osd pool create rbd3 8
+ rbd pool init rbd3
+ rbd namespace create rbd3/ns1
+
+ rbd trash purge schedule add -p rbd3/ns1 2d
+ rbd trash purge schedule ls -p rbd3 -R | grep 'rbd3 *ns1 *every 2d'
+
+ # Fetch and blocklist the rbd_support module's RADOS client
+ CLIENT_ADDR=$(ceph mgr dump | jq .active_clients[] |
+ jq 'select(.name == "rbd_support")' |
+ jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
+ ceph osd blocklist add $CLIENT_ADDR
+ ceph osd blocklist ls | grep $CLIENT_ADDR
+
+ # Check that you can add a trash purge schedule after a few retries
+ expect_fail rbd trash purge schedule add -p rbd3 10m
+ sleep 10
+ for i in `seq 24`; do
+ rbd trash purge schedule add -p rbd3 10m && break
+ sleep 10
+ done
+
+ rbd trash purge schedule ls -p rbd3 -R | grep 'every 10m'
+ # Verify that the schedule present before client blocklisting is preserved
+ rbd trash purge schedule ls -p rbd3 -R | grep 'rbd3 *ns1 *every 2d'
+
+ rbd trash purge schedule remove -p rbd3 10m
+ rbd trash purge schedule remove -p rbd3/ns1 2d
+ rbd trash purge schedule ls -p rbd3 -R | expect_fail grep 'every 10m'
+ rbd trash purge schedule ls -p rbd3 -R | expect_fail grep 'rbd3 *ns1 *every 2d'
+
+ ceph osd pool rm rbd3 rbd3 --yes-i-really-really-mean-it
+
+}
+
+test_mirror_snapshot_schedule() {
+ echo "testing mirror snapshot schedule..."
+ remove_images
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+ rbd namespace create rbd2/ns1
+
+ rbd mirror pool enable rbd2 image
+ rbd mirror pool enable rbd2/ns1 image
+ rbd mirror pool peer add rbd2 cluster1
+
+ test "$(ceph rbd mirror snapshot schedule list)" = "{}"
+ ceph rbd mirror snapshot schedule status | fgrep '"scheduled_images": []'
+
+ expect_fail rbd mirror snapshot schedule ls
+ test "$(rbd mirror snapshot schedule ls -R --format json)" = "[]"
+
+ rbd create $RBD_CREATE_ARGS -s 1 rbd2/ns1/test1
+
+ test "$(rbd mirror image status rbd2/ns1/test1 |
+ grep -c mirror.primary)" = '0'
+
+ rbd mirror image enable rbd2/ns1/test1 snapshot
+
+ test "$(rbd mirror image status rbd2/ns1/test1 |
+ grep -c mirror.primary)" = '1'
+
+ rbd mirror snapshot schedule add -p rbd2/ns1 --image test1 1m
+ expect_fail rbd mirror snapshot schedule ls
+ rbd mirror snapshot schedule ls -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ expect_fail rbd mirror snapshot schedule ls -p rbd2
+ rbd mirror snapshot schedule ls -p rbd2 -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ expect_fail rbd mirror snapshot schedule ls -p rbd2/ns1
+ rbd mirror snapshot schedule ls -p rbd2/ns1 -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ test "$(rbd mirror snapshot schedule ls -p rbd2/ns1 --image test1)" = 'every 1m'
+
+ for i in `seq 12`; do
+ test "$(rbd mirror image status rbd2/ns1/test1 |
+ grep -c mirror.primary)" -gt '1' && break
+ sleep 10
+ done
+
+ test "$(rbd mirror image status rbd2/ns1/test1 |
+ grep -c mirror.primary)" -gt '1'
+
+ # repeat with kicked in schedule, see https://tracker.ceph.com/issues/53915
+ expect_fail rbd mirror snapshot schedule ls
+ rbd mirror snapshot schedule ls -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ expect_fail rbd mirror snapshot schedule ls -p rbd2
+ rbd mirror snapshot schedule ls -p rbd2 -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ expect_fail rbd mirror snapshot schedule ls -p rbd2/ns1
+ rbd mirror snapshot schedule ls -p rbd2/ns1 -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ test "$(rbd mirror snapshot schedule ls -p rbd2/ns1 --image test1)" = 'every 1m'
+
+ rbd mirror snapshot schedule status
+ test "$(rbd mirror snapshot schedule status --format xml |
+ $XMLSTARLET sel -t -v '//scheduled_images/image/image')" = 'rbd2/ns1/test1'
+ test "$(rbd mirror snapshot schedule status -p rbd2 --format xml |
+ $XMLSTARLET sel -t -v '//scheduled_images/image/image')" = 'rbd2/ns1/test1'
+ test "$(rbd mirror snapshot schedule status -p rbd2/ns1 --format xml |
+ $XMLSTARLET sel -t -v '//scheduled_images/image/image')" = 'rbd2/ns1/test1'
+ test "$(rbd mirror snapshot schedule status -p rbd2/ns1 --image test1 --format xml |
+ $XMLSTARLET sel -t -v '//scheduled_images/image/image')" = 'rbd2/ns1/test1'
+
+ rbd mirror image demote rbd2/ns1/test1
+ for i in `seq 12`; do
+ rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1' || break
+ sleep 10
+ done
+ rbd mirror snapshot schedule status | expect_fail grep 'rbd2/ns1/test1'
+
+ rbd mirror image promote rbd2/ns1/test1
+ for i in `seq 12`; do
+ rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1' && break
+ sleep 10
+ done
+ rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1'
+
+ rbd mirror snapshot schedule add 1h 00:15
+ test "$(rbd mirror snapshot schedule ls)" = 'every 1h starting at 00:15:00'
+ rbd mirror snapshot schedule ls -R | grep 'every 1h starting at 00:15:00'
+ rbd mirror snapshot schedule ls -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ expect_fail rbd mirror snapshot schedule ls -p rbd2
+ rbd mirror snapshot schedule ls -p rbd2 -R | grep 'every 1h starting at 00:15:00'
+ rbd mirror snapshot schedule ls -p rbd2 -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ expect_fail rbd mirror snapshot schedule ls -p rbd2/ns1
+ rbd mirror snapshot schedule ls -p rbd2/ns1 -R | grep 'every 1h starting at 00:15:00'
+ rbd mirror snapshot schedule ls -p rbd2/ns1 -R | grep 'rbd2 *ns1 *test1 *every 1m'
+ test "$(rbd mirror snapshot schedule ls -p rbd2/ns1 --image test1)" = 'every 1m'
+
+ # Negative tests
+ expect_fail rbd mirror snapshot schedule add dummy
+ expect_fail rbd mirror snapshot schedule add -p rbd2/ns1 --image test1 dummy
+ expect_fail rbd mirror snapshot schedule remove dummy
+ expect_fail rbd mirror snapshot schedule remove -p rbd2/ns1 --image test1 dummy
+ test "$(rbd mirror snapshot schedule ls)" = 'every 1h starting at 00:15:00'
+ test "$(rbd mirror snapshot schedule ls -p rbd2/ns1 --image test1)" = 'every 1m'
+
+ rbd rm rbd2/ns1/test1
+ for i in `seq 12`; do
+ rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1' || break
+ sleep 10
+ done
+ rbd mirror snapshot schedule status | expect_fail grep 'rbd2/ns1/test1'
+
+ rbd mirror snapshot schedule remove
+ test "$(rbd mirror snapshot schedule ls -R --format json)" = "[]"
+
+ remove_images
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+}
+
+test_mirror_snapshot_schedule_recovery() {
+ echo "testing recovery of mirror snapshot scheduler after module's RADOS client is blocklisted..."
+ remove_images
+ ceph osd pool create rbd3 8
+ rbd pool init rbd3
+ rbd namespace create rbd3/ns1
+
+ rbd mirror pool enable rbd3 image
+ rbd mirror pool enable rbd3/ns1 image
+ rbd mirror pool peer add rbd3 cluster1
+
+ rbd create $RBD_CREATE_ARGS -s 1 rbd3/ns1/test1
+ rbd mirror image enable rbd3/ns1/test1 snapshot
+ test "$(rbd mirror image status rbd3/ns1/test1 |
+ grep -c mirror.primary)" = '1'
+
+ rbd mirror snapshot schedule add -p rbd3/ns1 --image test1 1m
+ test "$(rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1)" = 'every 1m'
+
+ # Fetch and blocklist rbd_support module's RADOS client
+ CLIENT_ADDR=$(ceph mgr dump | jq .active_clients[] |
+ jq 'select(.name == "rbd_support")' |
+ jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
+ ceph osd blocklist add $CLIENT_ADDR
+ ceph osd blocklist ls | grep $CLIENT_ADDR
+
+ # Check that you can add a mirror snapshot schedule after a few retries
+ expect_fail rbd mirror snapshot schedule add -p rbd3/ns1 --image test1 2m
+ sleep 10
+ for i in `seq 24`; do
+ rbd mirror snapshot schedule add -p rbd3/ns1 --image test1 2m && break
+ sleep 10
+ done
+
+ rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1 | grep 'every 2m'
+ # Verify that the schedule present before client blocklisting is preserved
+ rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1 | grep 'every 1m'
+
+ rbd mirror snapshot schedule rm -p rbd3/ns1 --image test1 2m
+ rbd mirror snapshot schedule rm -p rbd3/ns1 --image test1 1m
+ rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1 | expect_fail grep 'every 2m'
+ rbd mirror snapshot schedule ls -p rbd3/ns1 --image test1 | expect_fail grep 'every 1m'
+
+ rbd snap purge rbd3/ns1/test1
+ rbd rm rbd3/ns1/test1
+ ceph osd pool rm rbd3 rbd3 --yes-i-really-really-mean-it
+}
+
+test_perf_image_iostat() {
+ echo "testing perf image iostat..."
+ remove_images
+
+ ceph osd pool create rbd1 8
+ rbd pool init rbd1
+ rbd namespace create rbd1/ns
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+ rbd namespace create rbd2/ns
+
+ IMAGE_SPECS=("test1" "rbd1/test2" "rbd1/ns/test3" "rbd2/test4" "rbd2/ns/test5")
+ for spec in "${IMAGE_SPECS[@]}"; do
+ # ensure all images are created without a separate data pool
+ # as we filter iostat by specific pool specs below
+ rbd create $RBD_CREATE_ARGS --size 10G --rbd-default-data-pool '' $spec
+ done
+
+ BENCH_PIDS=()
+ for spec in "${IMAGE_SPECS[@]}"; do
+ rbd bench --io-type write --io-pattern rand --io-total 10G --io-threads 1 \
+ --rbd-cache false $spec >/dev/null 2>&1 &
+ BENCH_PIDS+=($!)
+ done
+
+ # test specifying pool spec via spec syntax
+ test "$(rbd perf image iostat --format json rbd1 |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test2'
+ test "$(rbd perf image iostat --format json rbd1/ns |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test3'
+ test "$(rbd perf image iostat --format json --rbd-default-pool rbd1 /ns |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test3'
+
+ # test specifying pool spec via options
+ test "$(rbd perf image iostat --format json --pool rbd2 |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test4'
+ test "$(rbd perf image iostat --format json --pool rbd2 --namespace ns |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test5'
+ test "$(rbd perf image iostat --format json --rbd-default-pool rbd2 --namespace ns |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test5'
+
+ # test omitting pool spec (-> GLOBAL_POOL_KEY)
+ test "$(rbd perf image iostat --format json |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test1 test2 test3 test4 test5'
+
+ for pid in "${BENCH_PIDS[@]}"; do
+ kill $pid
+ done
+ wait
+
+ remove_images
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+ ceph osd pool rm rbd1 rbd1 --yes-i-really-really-mean-it
+}
+
+test_perf_image_iostat_recovery() {
+ echo "testing recovery of perf handler after module's RADOS client is blocklisted..."
+ remove_images
+
+ ceph osd pool create rbd3 8
+ rbd pool init rbd3
+ rbd namespace create rbd3/ns
+
+ IMAGE_SPECS=("rbd3/test1" "rbd3/ns/test2")
+ for spec in "${IMAGE_SPECS[@]}"; do
+ # ensure all images are created without a separate data pool
+ # as we filter iostat by specific pool specs below
+ rbd create $RBD_CREATE_ARGS --size 10G --rbd-default-data-pool '' $spec
+ done
+
+ BENCH_PIDS=()
+ for spec in "${IMAGE_SPECS[@]}"; do
+ rbd bench --io-type write --io-pattern rand --io-total 10G --io-threads 1 \
+ --rbd-cache false $spec >/dev/null 2>&1 &
+ BENCH_PIDS+=($!)
+ done
+
+ test "$(rbd perf image iostat --format json rbd3 |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test1'
+
+ # Fetch and blocklist the rbd_support module's RADOS client
+ CLIENT_ADDR=$(ceph mgr dump | jq .active_clients[] |
+ jq 'select(.name == "rbd_support")' |
+ jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
+ ceph osd blocklist add $CLIENT_ADDR
+ ceph osd blocklist ls | grep $CLIENT_ADDR
+
+ expect_fail rbd perf image iostat --format json rbd3/ns
+ sleep 10
+ for i in `seq 24`; do
+ test "$(rbd perf image iostat --format json rbd3/ns |
+ jq -r 'map(.image) | sort | join(" ")')" = 'test2' && break
+ sleep 10
+ done
+
+ for pid in "${BENCH_PIDS[@]}"; do
+ kill $pid
+ done
+ wait
+
+ remove_images
+ ceph osd pool rm rbd3 rbd3 --yes-i-really-really-mean-it
+}
+
+test_mirror_pool_peer_bootstrap_create() {
+ echo "testing mirror pool peer bootstrap create..."
+ remove_images
+
+ ceph osd pool create rbd1 8
+ rbd pool init rbd1
+ rbd mirror pool enable rbd1 image
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+ rbd mirror pool enable rbd2 pool
+
+ readarray -t MON_ADDRS < <(ceph mon dump |
+ sed -n 's/^[0-9]: \(.*\) mon\.[a-z]$/\1/p')
+
+ # check that all monitors make it to the token even if only one
+ # valid monitor is specified
+ BAD_MON_ADDR="1.2.3.4:6789"
+ MON_HOST="${MON_ADDRS[0]},$BAD_MON_ADDR"
+ TOKEN="$(rbd mirror pool peer bootstrap create \
+ --mon-host "$MON_HOST" rbd1 | base64 -d)"
+ TOKEN_FSID="$(jq -r '.fsid' <<< "$TOKEN")"
+ TOKEN_CLIENT_ID="$(jq -r '.client_id' <<< "$TOKEN")"
+ TOKEN_KEY="$(jq -r '.key' <<< "$TOKEN")"
+ TOKEN_MON_HOST="$(jq -r '.mon_host' <<< "$TOKEN")"
+
+ test "$TOKEN_FSID" = "$(ceph fsid)"
+ test "$TOKEN_KEY" = "$(ceph auth get-key client.$TOKEN_CLIENT_ID)"
+ for addr in "${MON_ADDRS[@]}"; do
+ fgrep "$addr" <<< "$TOKEN_MON_HOST"
+ done
+ expect_fail fgrep "$BAD_MON_ADDR" <<< "$TOKEN_MON_HOST"
+
+ # check that the token does not change, including across pools
+ test "$(rbd mirror pool peer bootstrap create \
+ --mon-host "$MON_HOST" rbd1 | base64 -d)" = "$TOKEN"
+ test "$(rbd mirror pool peer bootstrap create \
+ rbd1 | base64 -d)" = "$TOKEN"
+ test "$(rbd mirror pool peer bootstrap create \
+ --mon-host "$MON_HOST" rbd2 | base64 -d)" = "$TOKEN"
+ test "$(rbd mirror pool peer bootstrap create \
+ rbd2 | base64 -d)" = "$TOKEN"
+
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+ ceph osd pool rm rbd1 rbd1 --yes-i-really-really-mean-it
+}
+
+test_tasks_removed_pool() {
+ echo "testing removing pool under running tasks..."
+ remove_images
+
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+
+ rbd create $RBD_CREATE_ARGS --size 1G foo
+ rbd snap create foo@snap
+ rbd snap protect foo@snap
+ rbd clone foo@snap bar
+
+ rbd create $RBD_CREATE_ARGS --size 1G rbd2/dummy
+ rbd bench --io-type write --io-pattern seq --io-size 1M --io-total 1G rbd2/dummy
+ rbd snap create rbd2/dummy@snap
+ rbd snap protect rbd2/dummy@snap
+ for i in {1..5}; do
+ rbd clone rbd2/dummy@snap rbd2/dummy$i
+ done
+
+ # queue flattens on a few dummy images and remove that pool
+ test "$(ceph rbd task list)" = "[]"
+ for i in {1..5}; do
+ ceph rbd task add flatten rbd2/dummy$i
+ done
+ ceph osd pool delete rbd2 rbd2 --yes-i-really-really-mean-it
+ test "$(ceph rbd task list)" != "[]"
+
+ # queue flatten on another image and check that it completes
+ rbd info bar | grep 'parent: '
+ expect_fail rbd snap unprotect foo@snap
+ ceph rbd task add flatten bar
+ for i in {1..12}; do
+ rbd info bar | grep 'parent: ' || break
+ sleep 10
+ done
+ rbd info bar | expect_fail grep 'parent: '
+ rbd snap unprotect foo@snap
+
+ # check that flattens disrupted by pool removal are cleaned up
+ for i in {1..12}; do
+ test "$(ceph rbd task list)" = "[]" && break
+ sleep 10
+ done
+ test "$(ceph rbd task list)" = "[]"
+
+ remove_images
+}
+
+test_tasks_recovery() {
+ echo "testing task handler recovery after module's RADOS client is blocklisted..."
+ remove_images
+
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+
+ rbd create $RBD_CREATE_ARGS --size 1G rbd2/img1
+ rbd bench --io-type write --io-pattern seq --io-size 1M --io-total 1G rbd2/img1
+ rbd snap create rbd2/img1@snap
+ rbd snap protect rbd2/img1@snap
+ rbd clone rbd2/img1@snap rbd2/clone1
+
+ # Fetch and blocklist rbd_support module's RADOS client
+ CLIENT_ADDR=$(ceph mgr dump | jq .active_clients[] |
+ jq 'select(.name == "rbd_support")' |
+ jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
+ ceph osd blocklist add $CLIENT_ADDR
+ ceph osd blocklist ls | grep $CLIENT_ADDR
+
+ expect_fail ceph rbd task add flatten rbd2/clone1
+ sleep 10
+ for i in `seq 24`; do
+ ceph rbd task add flatten rbd2/clone1 && break
+ sleep 10
+ done
+ test "$(ceph rbd task list)" != "[]"
+
+ for i in {1..12}; do
+ rbd info rbd2/clone1 | grep 'parent: ' || break
+ sleep 10
+ done
+ rbd info rbd2/clone1 | expect_fail grep 'parent: '
+ rbd snap unprotect rbd2/img1@snap
+
+ test "$(ceph rbd task list)" = "[]"
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+}
+
+test_pool_image_args
+test_rename
+test_ls
+test_remove
+test_migration
+test_config
+RBD_CREATE_ARGS=""
+test_others
+test_locking
+test_thick_provision
+RBD_CREATE_ARGS="--image-format 2"
+test_others
+test_locking
+test_clone
+test_trash
+test_purge
+test_deep_copy_clone
+test_clone_v2
+test_thick_provision
+test_namespace
+test_trash_purge_schedule
+test_trash_purge_schedule_recovery
+test_mirror_snapshot_schedule
+test_mirror_snapshot_schedule_recovery
+test_perf_image_iostat
+test_perf_image_iostat_recovery
+test_mirror_pool_peer_bootstrap_create
+test_tasks_removed_pool
+test_tasks_recovery
+
+echo OK
diff --git a/qa/workunits/rbd/cli_migration.sh b/qa/workunits/rbd/cli_migration.sh
new file mode 100755
index 000000000..be8e031fd
--- /dev/null
+++ b/qa/workunits/rbd/cli_migration.sh
@@ -0,0 +1,357 @@
+#!/usr/bin/env bash
+set -ex
+
+. $(dirname $0)/../../standalone/ceph-helpers.sh
+
+TEMPDIR=
+IMAGE1=image1
+IMAGE2=image2
+IMAGE3=image3
+IMAGES="${IMAGE1} ${IMAGE2} ${IMAGE3}"
+
+cleanup() {
+ cleanup_tempdir
+ remove_images
+}
+
+setup_tempdir() {
+ TEMPDIR=`mktemp -d`
+}
+
+cleanup_tempdir() {
+ rm -rf ${TEMPDIR}
+}
+
+create_base_image() {
+ local image=$1
+
+ rbd create --size 1G ${image}
+ rbd bench --io-type write --io-pattern rand --io-size=4K --io-total 256M ${image}
+ rbd snap create ${image}@1
+ rbd bench --io-type write --io-pattern rand --io-size=4K --io-total 64M ${image}
+ rbd snap create ${image}@2
+ rbd bench --io-type write --io-pattern rand --io-size=4K --io-total 128M ${image}
+}
+
+export_raw_image() {
+ local image=$1
+
+ rm -rf "${TEMPDIR}/${image}"
+ rbd export ${image} "${TEMPDIR}/${image}"
+}
+
+export_base_image() {
+ local image=$1
+
+ export_raw_image "${image}"
+ export_raw_image "${image}@1"
+ export_raw_image "${image}@2"
+}
+
+remove_image() {
+ local image=$1
+
+ (rbd migration abort $image || true) >/dev/null 2>&1
+ (rbd snap purge $image || true) >/dev/null 2>&1
+ (rbd rm $image || true) >/dev/null 2>&1
+}
+
+remove_images() {
+ for image in ${IMAGES}
+ do
+ remove_image ${image}
+ done
+}
+
+show_diff()
+{
+ local file1=$1
+ local file2=$2
+
+ xxd "${file1}" > "${file1}.xxd"
+ xxd "${file2}" > "${file2}.xxd"
+ sdiff -s "${file1}.xxd" "${file2}.xxd" | head -n 64
+ rm -f "${file1}.xxd" "${file2}.xxd"
+}
+
+compare_images() {
+ local src_image=$1
+ local dst_image=$2
+ local ret=0
+
+ export_raw_image ${dst_image}
+ if ! cmp "${TEMPDIR}/${src_image}" "${TEMPDIR}/${dst_image}"
+ then
+ show_diff "${TEMPDIR}/${src_image}" "${TEMPDIR}/${dst_image}"
+ ret=1
+ fi
+ return ${ret}
+}
+
+test_import_native_format() {
+ local base_image=$1
+ local dest_image=$2
+
+ rbd migration prepare --import-only "rbd/${base_image}@2" ${dest_image}
+ rbd migration abort ${dest_image}
+
+ local pool_id=$(ceph osd pool ls detail --format xml | xmlstarlet sel -t -v "//pools/pool[pool_name='rbd']/pool_id")
+ cat > ${TEMPDIR}/spec.json <<EOF
+{
+ "type": "native",
+ "pool_id": ${pool_id},
+ "pool_namespace": "",
+ "image_name": "${base_image}",
+ "snap_name": "2"
+}
+EOF
+ cat ${TEMPDIR}/spec.json
+
+ rbd migration prepare --import-only \
+ --source-spec-path ${TEMPDIR}/spec.json ${dest_image}
+
+ compare_images "${base_image}@1" "${dest_image}@1"
+ compare_images "${base_image}@2" "${dest_image}@2"
+
+ rbd migration abort ${dest_image}
+
+ rbd migration prepare --import-only \
+ --source-spec-path ${TEMPDIR}/spec.json ${dest_image}
+ rbd migration execute ${dest_image}
+
+ compare_images "${base_image}@1" "${dest_image}@1"
+ compare_images "${base_image}@2" "${dest_image}@2"
+
+ rbd migration abort ${dest_image}
+
+ rbd migration prepare --import-only \
+ --source-spec "{\"type\": \"native\", \"pool_id\": "${pool_id}", \"image_name\": \"${base_image}\", \"snap_name\": \"2\"}" \
+ ${dest_image}
+ rbd migration abort ${dest_image}
+
+ rbd migration prepare --import-only \
+ --source-spec "{\"type\": \"native\", \"pool_name\": \"rbd\", \"image_name\": \"${base_image}\", \"snap_name\": \"2\"}" \
+ ${dest_image}
+ rbd migration execute ${dest_image}
+ rbd migration commit ${dest_image}
+
+ compare_images "${base_image}@1" "${dest_image}@1"
+ compare_images "${base_image}@2" "${dest_image}@2"
+
+ remove_image "${dest_image}"
+}
+
+test_import_qcow_format() {
+ local base_image=$1
+ local dest_image=$2
+
+ if ! qemu-img convert -f raw -O qcow rbd:rbd/${base_image} ${TEMPDIR}/${base_image}.qcow; then
+ echo "skipping QCOW test"
+ return 0
+ fi
+ qemu-img info -f qcow ${TEMPDIR}/${base_image}.qcow
+
+ cat > ${TEMPDIR}/spec.json <<EOF
+{
+ "type": "qcow",
+ "stream": {
+ "type": "file",
+ "file_path": "${TEMPDIR}/${base_image}.qcow"
+ }
+}
+EOF
+ cat ${TEMPDIR}/spec.json
+
+ set +e
+ rbd migration prepare --import-only \
+ --source-spec-path ${TEMPDIR}/spec.json ${dest_image}
+ local error_code=$?
+ set -e
+
+ if [ $error_code -eq 95 ]; then
+ echo "skipping QCOW test (librbd support disabled)"
+ return 0
+ fi
+ test $error_code -eq 0
+
+ compare_images "${base_image}" "${dest_image}"
+
+ rbd migration abort ${dest_image}
+
+ rbd migration prepare --import-only \
+ --source-spec-path ${TEMPDIR}/spec.json ${dest_image}
+
+ compare_images "${base_image}" "${dest_image}"
+
+ rbd migration execute ${dest_image}
+
+ compare_images "${base_image}" "${dest_image}"
+
+ rbd migration commit ${dest_image}
+
+ compare_images "${base_image}" "${dest_image}"
+
+ remove_image "${dest_image}"
+}
+
+test_import_qcow2_format() {
+ local base_image=$1
+ local dest_image=$2
+
+ # create new image via qemu-img and its bench tool since we cannot
+ # import snapshot deltas into QCOW2
+ qemu-img create -f qcow2 ${TEMPDIR}/${base_image}.qcow2 1G
+
+ qemu-img bench -f qcow2 -w -c 65536 -d 16 --pattern 65 -s 4096 \
+ -S $((($RANDOM % 262144) * 4096)) ${TEMPDIR}/${base_image}.qcow2
+ qemu-img convert -f qcow2 -O raw ${TEMPDIR}/${base_image}.qcow2 \
+ "${TEMPDIR}/${base_image}@snap1"
+ qemu-img snapshot -c "snap1" ${TEMPDIR}/${base_image}.qcow2
+
+ qemu-img bench -f qcow2 -w -c 16384 -d 16 --pattern 66 -s 4096 \
+ -S $((($RANDOM % 262144) * 4096)) ${TEMPDIR}/${base_image}.qcow2
+ qemu-img convert -f qcow2 -O raw ${TEMPDIR}/${base_image}.qcow2 \
+ "${TEMPDIR}/${base_image}@snap2"
+ qemu-img snapshot -c "snap2" ${TEMPDIR}/${base_image}.qcow2
+
+ qemu-img bench -f qcow2 -w -c 32768 -d 16 --pattern 67 -s 4096 \
+ -S $((($RANDOM % 262144) * 4096)) ${TEMPDIR}/${base_image}.qcow2
+ qemu-img convert -f qcow2 -O raw ${TEMPDIR}/${base_image}.qcow2 \
+ ${TEMPDIR}/${base_image}
+
+ qemu-img info -f qcow2 ${TEMPDIR}/${base_image}.qcow2
+
+ cat > ${TEMPDIR}/spec.json <<EOF
+{
+ "type": "qcow",
+ "stream": {
+ "type": "file",
+ "file_path": "${TEMPDIR}/${base_image}.qcow2"
+ }
+}
+EOF
+ cat ${TEMPDIR}/spec.json
+
+ rbd migration prepare --import-only \
+ --source-spec-path ${TEMPDIR}/spec.json ${dest_image}
+
+ compare_images "${base_image}@snap1" "${dest_image}@snap1"
+ compare_images "${base_image}@snap2" "${dest_image}@snap2"
+ compare_images "${base_image}" "${dest_image}"
+
+ rbd migration abort ${dest_image}
+
+ rbd migration prepare --import-only \
+ --source-spec-path ${TEMPDIR}/spec.json ${dest_image}
+
+ compare_images "${base_image}@snap1" "${dest_image}@snap1"
+ compare_images "${base_image}@snap2" "${dest_image}@snap2"
+ compare_images "${base_image}" "${dest_image}"
+
+ rbd migration execute ${dest_image}
+
+ compare_images "${base_image}@snap1" "${dest_image}@snap1"
+ compare_images "${base_image}@snap2" "${dest_image}@snap2"
+ compare_images "${base_image}" "${dest_image}"
+
+ rbd migration commit ${dest_image}
+
+ compare_images "${base_image}@snap1" "${dest_image}@snap1"
+ compare_images "${base_image}@snap2" "${dest_image}@snap2"
+ compare_images "${base_image}" "${dest_image}"
+
+ remove_image "${dest_image}"
+}
+
+test_import_raw_format() {
+ local base_image=$1
+ local dest_image=$2
+
+ cat > ${TEMPDIR}/spec.json <<EOF
+{
+ "type": "raw",
+ "stream": {
+ "type": "file",
+ "file_path": "${TEMPDIR}/${base_image}"
+ }
+}
+EOF
+ cat ${TEMPDIR}/spec.json
+
+ cat ${TEMPDIR}/spec.json | rbd migration prepare --import-only \
+ --source-spec-path - ${dest_image}
+ compare_images ${base_image} ${dest_image}
+ rbd migration abort ${dest_image}
+
+ rbd migration prepare --import-only \
+ --source-spec-path ${TEMPDIR}/spec.json ${dest_image}
+ rbd migration execute ${dest_image}
+ rbd migration commit ${dest_image}
+
+ compare_images ${base_image} ${dest_image}
+
+ remove_image "${dest_image}"
+
+ cat > ${TEMPDIR}/spec.json <<EOF
+{
+ "type": "raw",
+ "stream": {
+ "type": "file",
+ "file_path": "${TEMPDIR}/${base_image}"
+ },
+ "snapshots": [{
+ "type": "raw",
+ "name": "snap1",
+ "stream": {
+ "type": "file",
+ "file_path": "${TEMPDIR}/${base_image}@1"
+ }
+ }, {
+ "type": "raw",
+ "name": "snap2",
+ "stream": {
+ "type": "file",
+ "file_path": "${TEMPDIR}/${base_image}@2"
+ }
+ }]
+}
+EOF
+ cat ${TEMPDIR}/spec.json
+
+ rbd migration prepare --import-only \
+ --source-spec-path ${TEMPDIR}/spec.json ${dest_image}
+
+ rbd snap create ${dest_image}@head
+ rbd bench --io-type write --io-pattern rand --io-size=32K --io-total=32M ${dest_image}
+
+ compare_images "${base_image}" "${dest_image}@head"
+ compare_images "${base_image}@1" "${dest_image}@snap1"
+ compare_images "${base_image}@2" "${dest_image}@snap2"
+ compare_images "${base_image}" "${dest_image}@head"
+
+ rbd migration execute ${dest_image}
+
+ compare_images "${base_image}@1" "${dest_image}@snap1"
+ compare_images "${base_image}@2" "${dest_image}@snap2"
+ compare_images "${base_image}" "${dest_image}@head"
+
+ rbd migration commit ${dest_image}
+
+ remove_image "${dest_image}"
+}
+
+# make sure rbd pool is EMPTY.. this is a test script!!
+rbd ls 2>&1 | wc -l | grep -v '^0$' && echo "nonempty rbd pool, aborting! run this script on an empty test cluster only." && exit 1
+
+setup_tempdir
+trap 'cleanup $?' INT TERM EXIT
+
+create_base_image ${IMAGE1}
+export_base_image ${IMAGE1}
+
+test_import_native_format ${IMAGE1} ${IMAGE2}
+test_import_qcow_format ${IMAGE1} ${IMAGE2}
+test_import_qcow2_format ${IMAGE2} ${IMAGE3}
+test_import_raw_format ${IMAGE1} ${IMAGE2}
+
+echo OK
diff --git a/qa/workunits/rbd/concurrent.sh b/qa/workunits/rbd/concurrent.sh
new file mode 100755
index 000000000..abaad75f5
--- /dev/null
+++ b/qa/workunits/rbd/concurrent.sh
@@ -0,0 +1,375 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2013 Inktank Storage, Inc.
+#
+# This is free software; see the source for copying conditions.
+# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE.
+#
+# This is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as
+# published by the Free Software Foundation version 2.
+
+# Alex Elder <elder@inktank.com>
+# January 29, 2013
+
+################################################################
+
+# The purpose of this test is to exercise paths through the rbd
+# code, making sure no bad pointer references or invalid reference
+# count operations occur in the face of concurrent activity.
+#
+# Each pass of the test creates an rbd image, maps it, and writes
+# some data into the image. It also reads some data from all of the
+# other images that exist at the time the pass executes. Finally,
+# the image is unmapped and removed. The image removal completes in
+# the background.
+#
+# An iteration of the test consists of performing some number of
+# passes, initating each pass as a background job, and finally
+# sleeping for a variable delay. The delay is initially a specified
+# value, but each iteration shortens that proportionally, such that
+# the last iteration will not delay at all.
+#
+# The result exercises concurrent creates and deletes of rbd images,
+# writes to new images, reads from both written and unwritten image
+# data (including reads concurrent with writes), and attempts to
+# unmap images being read.
+
+# Usage: concurrent [-i <iter>] [-c <count>] [-d <delay>]
+#
+# Exit status:
+# 0: success
+# 1: usage error
+# 2: other runtime error
+# 99: argument count error (programming error)
+# 100: getopt error (internal error)
+
+################################################################
+
+set -ex
+
+# Default flag values; RBD_CONCURRENT_ITER names are intended
+# to be used in yaml scripts to pass in alternate values, e.g.:
+# env:
+# RBD_CONCURRENT_ITER: 20
+# RBD_CONCURRENT_COUNT: 5
+# RBD_CONCURRENT_DELAY: 3
+ITER_DEFAULT=${RBD_CONCURRENT_ITER:-100}
+COUNT_DEFAULT=${RBD_CONCURRENT_COUNT:-5}
+DELAY_DEFAULT=${RBD_CONCURRENT_DELAY:-5} # seconds
+
+CEPH_SECRET_FILE=${CEPH_SECRET_FILE:-}
+CEPH_ID=${CEPH_ID:-admin}
+SECRET_ARGS=""
+if [ "${CEPH_SECRET_FILE}" ]; then
+ SECRET_ARGS="--secret $CEPH_SECRET_FILE"
+fi
+
+################################################################
+
+function setup() {
+ ID_MAX_DIR=$(mktemp -d /tmp/image_max_id.XXXXX)
+ ID_COUNT_DIR=$(mktemp -d /tmp/image_ids.XXXXXX)
+ NAMES_DIR=$(mktemp -d /tmp/image_names.XXXXXX)
+ SOURCE_DATA=$(mktemp /tmp/source_data.XXXXXX)
+
+ # Use urandom to generate SOURCE_DATA
+ dd if=/dev/urandom of=${SOURCE_DATA} bs=2048 count=66 \
+ >/dev/null 2>&1
+
+ # List of rbd id's *not* created by this script
+ export INITIAL_RBD_IDS=$(ls /sys/bus/rbd/devices)
+
+ # Set up some environment for normal teuthology test setup.
+ # This really should not be necessary but I found it was.
+
+ export CEPH_ARGS=" --name client.0"
+}
+
+function cleanup() {
+ [ ! "${ID_MAX_DIR}" ] && return
+ local id
+ local image
+
+ # Unmap mapped devices
+ for id in $(rbd_ids); do
+ image=$(cat "/sys/bus/rbd/devices/${id}/name")
+ rbd_unmap_image "${id}"
+ rbd_destroy_image "${image}"
+ done
+ # Get any leftover images
+ for image in $(rbd ls 2>/dev/null); do
+ rbd_destroy_image "${image}"
+ done
+ wait
+ sync
+ rm -f "${SOURCE_DATA}"
+ [ -d "${NAMES_DIR}" ] && rmdir "${NAMES_DIR}"
+ echo "Max concurrent rbd image count was $(get_max "${ID_COUNT_DIR}")"
+ rm -rf "${ID_COUNT_DIR}"
+ echo "Max rbd image id was $(get_max "${ID_MAX_DIR}")"
+ rm -rf "${ID_MAX_DIR}"
+}
+
+function get_max() {
+ [ $# -eq 1 ] || exit 99
+ local dir="$1"
+
+ ls -U "${dir}" | sort -n | tail -1
+}
+
+trap cleanup HUP INT QUIT
+
+# print a usage message and quit
+#
+# if a message is supplied, print that first, and then exit
+# with non-zero status
+function usage() {
+ if [ $# -gt 0 ]; then
+ echo "" >&2
+ echo "$@" >&2
+ fi
+
+ echo "" >&2
+ echo "Usage: ${PROGNAME} <options> <tests>" >&2
+ echo "" >&2
+ echo " options:" >&2
+ echo " -h or --help" >&2
+ echo " show this message" >&2
+ echo " -i or --iterations" >&2
+ echo " iteration count (1 or more)" >&2
+ echo " -c or --count" >&2
+ echo " images created per iteration (1 or more)" >&2
+ echo " -d or --delay" >&2
+ echo " maximum delay between iterations" >&2
+ echo "" >&2
+ echo " defaults:" >&2
+ echo " iterations: ${ITER_DEFAULT}"
+ echo " count: ${COUNT_DEFAULT}"
+ echo " delay: ${DELAY_DEFAULT} (seconds)"
+ echo "" >&2
+
+ [ $# -gt 0 ] && exit 1
+
+ exit 0 # This is used for a --help
+}
+
+# parse command line arguments
+function parseargs() {
+ ITER="${ITER_DEFAULT}"
+ COUNT="${COUNT_DEFAULT}"
+ DELAY="${DELAY_DEFAULT}"
+
+ # Short option flags
+ SHORT_OPTS=""
+ SHORT_OPTS="${SHORT_OPTS},h"
+ SHORT_OPTS="${SHORT_OPTS},i:"
+ SHORT_OPTS="${SHORT_OPTS},c:"
+ SHORT_OPTS="${SHORT_OPTS},d:"
+
+ # Short option flags
+ LONG_OPTS=""
+ LONG_OPTS="${LONG_OPTS},help"
+ LONG_OPTS="${LONG_OPTS},iterations:"
+ LONG_OPTS="${LONG_OPTS},count:"
+ LONG_OPTS="${LONG_OPTS},delay:"
+
+ TEMP=$(getopt --name "${PROGNAME}" \
+ --options "${SHORT_OPTS}" \
+ --longoptions "${LONG_OPTS}" \
+ -- "$@")
+ eval set -- "$TEMP"
+
+ while [ "$1" != "--" ]; do
+ case "$1" in
+ -h|--help)
+ usage
+ ;;
+ -i|--iterations)
+ ITER="$2"
+ [ "${ITER}" -lt 1 ] &&
+ usage "bad iterations value"
+ shift
+ ;;
+ -c|--count)
+ COUNT="$2"
+ [ "${COUNT}" -lt 1 ] &&
+ usage "bad count value"
+ shift
+ ;;
+ -d|--delay)
+ DELAY="$2"
+ shift
+ ;;
+ *)
+ exit 100 # Internal error
+ ;;
+ esac
+ shift
+ done
+ shift
+}
+
+function rbd_ids() {
+ [ $# -eq 0 ] || exit 99
+ local ids
+ local i
+
+ [ -d /sys/bus/rbd ] || return
+ ids=" $(echo $(ls /sys/bus/rbd/devices)) "
+ for i in ${INITIAL_RBD_IDS}; do
+ ids=${ids/ ${i} / }
+ done
+ echo ${ids}
+}
+
+function update_maxes() {
+ local ids="$@"
+ local last_id
+ # These aren't 100% safe against concurrent updates but it
+ # should be pretty close
+ count=$(echo ${ids} | wc -w)
+ touch "${ID_COUNT_DIR}/${count}"
+ last_id=${ids% }
+ last_id=${last_id##* }
+ touch "${ID_MAX_DIR}/${last_id}"
+}
+
+function rbd_create_image() {
+ [ $# -eq 0 ] || exit 99
+ local image=$(basename $(mktemp "${NAMES_DIR}/image.XXXXXX"))
+
+ rbd create "${image}" --size=1024
+ echo "${image}"
+}
+
+function rbd_image_id() {
+ [ $# -eq 1 ] || exit 99
+ local image="$1"
+
+ grep -l "${image}" /sys/bus/rbd/devices/*/name 2>/dev/null |
+ cut -d / -f 6
+}
+
+function rbd_map_image() {
+ [ $# -eq 1 ] || exit 99
+ local image="$1"
+ local id
+
+ sudo rbd map "${image}" --user "${CEPH_ID}" ${SECRET_ARGS} \
+ > /dev/null 2>&1
+
+ id=$(rbd_image_id "${image}")
+ echo "${id}"
+}
+
+function rbd_write_image() {
+ [ $# -eq 1 ] || exit 99
+ local id="$1"
+
+ # Offset and size here are meant to ensure beginning and end
+ # cross both (4K or 64K) page and (4MB) rbd object boundaries.
+ # It assumes the SOURCE_DATA file has size 66 * 2048 bytes
+ dd if="${SOURCE_DATA}" of="/dev/rbd${id}" bs=2048 seek=2015 \
+ > /dev/null 2>&1
+}
+
+# All starting and ending offsets here are selected so they are not
+# aligned on a (4 KB or 64 KB) page boundary
+function rbd_read_image() {
+ [ $# -eq 1 ] || exit 99
+ local id="$1"
+
+ # First read starting and ending at an offset before any
+ # written data. The osd zero-fills data read from an
+ # existing rbd object, but before any previously-written
+ # data.
+ dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=3 \
+ > /dev/null 2>&1
+ # Next read starting at an offset before any written data,
+ # but ending at an offset that includes data that's been
+ # written. The osd zero-fills unwritten data at the
+ # beginning of a read.
+ dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=1983 \
+ > /dev/null 2>&1
+ # Read the data at offset 2015 * 2048 bytes (where it was
+ # written) and make sure it matches the original data.
+ cmp --quiet "${SOURCE_DATA}" "/dev/rbd${id}" 0 4126720 ||
+ echo "MISMATCH!!!"
+ # Now read starting within the pre-written data, but ending
+ # beyond it. The rbd client zero-fills the unwritten
+ # portion at the end of a read.
+ dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=2079 \
+ > /dev/null 2>&1
+ # Now read starting from an unwritten range within a written
+ # rbd object. The rbd client zero-fills this.
+ dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=2115 \
+ > /dev/null 2>&1
+ # Finally read from an unwritten region which would reside
+ # in a different (non-existent) osd object. The osd client
+ # zero-fills unwritten data when the target object doesn't
+ # exist.
+ dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=4098 \
+ > /dev/null 2>&1
+}
+
+function rbd_unmap_image() {
+ [ $# -eq 1 ] || exit 99
+ local id="$1"
+
+ sudo rbd unmap "/dev/rbd${id}"
+}
+
+function rbd_destroy_image() {
+ [ $# -eq 1 ] || exit 99
+ local image="$1"
+
+ # Don't wait for it to complete, to increase concurrency
+ rbd rm "${image}" >/dev/null 2>&1 &
+ rm -f "${NAMES_DIR}/${image}"
+}
+
+function one_pass() {
+ [ $# -eq 0 ] || exit 99
+ local image
+ local id
+ local ids
+ local i
+
+ image=$(rbd_create_image)
+ id=$(rbd_map_image "${image}")
+ ids=$(rbd_ids)
+ update_maxes "${ids}"
+ for i in ${rbd_ids}; do
+ if [ "${i}" -eq "${id}" ]; then
+ rbd_write_image "${i}"
+ else
+ rbd_read_image "${i}"
+ fi
+ done
+ rbd_unmap_image "${id}"
+ rbd_destroy_image "${image}"
+}
+
+################################################################
+
+parseargs "$@"
+
+setup
+
+for iter in $(seq 1 "${ITER}"); do
+ for count in $(seq 1 "${COUNT}"); do
+ one_pass &
+ done
+ # Sleep longer at first, overlap iterations more later.
+ # Use awk to get sub-second granularity (see sleep(1)).
+ sleep $(echo "${DELAY}" "${iter}" "${ITER}" |
+ awk '{ printf("%.2f\n", $1 - $1 * $2 / $3);}')
+
+done
+wait
+
+cleanup
+
+exit 0
diff --git a/qa/workunits/rbd/crimson/test_crimson_librbd.sh b/qa/workunits/rbd/crimson/test_crimson_librbd.sh
new file mode 100755
index 000000000..fb308de41
--- /dev/null
+++ b/qa/workunits/rbd/crimson/test_crimson_librbd.sh
@@ -0,0 +1,35 @@
+#!/bin/sh -e
+
+if [ -n "${VALGRIND}" ]; then
+ valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
+ --error-exitcode=1 ceph_test_librbd
+else
+ # Run test cases indivually to allow better selection
+ # of ongoing Crimson development.
+ # Disabled test groups are tracked here:
+ # https://tracker.ceph.com/issues/58791
+ ceph_test_librbd --gtest_filter='TestLibRBD.*'
+ ceph_test_librbd --gtest_filter='EncryptedFlattenTest/0.*'
+ ceph_test_librbd --gtest_filter='EncryptedFlattenTest/1.*'
+ ceph_test_librbd --gtest_filter='EncryptedFlattenTest/2.*'
+ ceph_test_librbd --gtest_filter='EncryptedFlattenTest/3.*'
+ ceph_test_librbd --gtest_filter='EncryptedFlattenTest/4.*'
+ ceph_test_librbd --gtest_filter='EncryptedFlattenTest/5.*'
+ ceph_test_librbd --gtest_filter='EncryptedFlattenTest/6.*'
+ ceph_test_librbd --gtest_filter='EncryptedFlattenTest/7.*'
+ # ceph_test_librbd --gtest_filter='DiffIterateTest/0.*'
+ # ceph_test_librbd --gtest_filter='DiffIterateTest/1.*'
+ ceph_test_librbd --gtest_filter='TestImageWatcher.*'
+ ceph_test_librbd --gtest_filter='TestInternal.*'
+ ceph_test_librbd --gtest_filter='TestMirroring.*'
+ # ceph_test_librbd --gtest_filter='TestDeepCopy.*'
+ ceph_test_librbd --gtest_filter='TestGroup.*'
+ # ceph_test_librbd --gtest_filter='TestMigration.*'
+ ceph_test_librbd --gtest_filter='TestMirroringWatcher.*'
+ ceph_test_librbd --gtest_filter='TestObjectMap.*'
+ ceph_test_librbd --gtest_filter='TestOperations.*'
+ ceph_test_librbd --gtest_filter='TestTrash.*'
+ ceph_test_librbd --gtest_filter='TestJournalEntries.*'
+ ceph_test_librbd --gtest_filter='TestJournalReplay.*'
+fi
+exit 0
diff --git a/qa/workunits/rbd/diff.sh b/qa/workunits/rbd/diff.sh
new file mode 100755
index 000000000..fbd6e0642
--- /dev/null
+++ b/qa/workunits/rbd/diff.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+set -ex
+
+function cleanup() {
+ rbd snap purge foo || :
+ rbd rm foo || :
+ rbd snap purge foo.copy || :
+ rbd rm foo.copy || :
+ rbd snap purge foo.copy2 || :
+ rbd rm foo.copy2 || :
+ rm -f foo.diff foo.out
+}
+
+cleanup
+
+rbd create foo --size 1000
+rbd bench --io-type write foo --io-size 4096 --io-threads 5 --io-total 4096000 --io-pattern rand
+
+#rbd cp foo foo.copy
+rbd create foo.copy --size 1000
+rbd export-diff foo - | rbd import-diff - foo.copy
+
+rbd snap create foo --snap=two
+rbd bench --io-type write foo --io-size 4096 --io-threads 5 --io-total 4096000 --io-pattern rand
+rbd snap create foo --snap=three
+rbd snap create foo.copy --snap=two
+
+rbd export-diff foo@two --from-snap three foo.diff && exit 1 || true # wrong snap order
+rm -f foo.diff
+
+rbd export-diff foo@three --from-snap two foo.diff
+rbd import-diff foo.diff foo.copy
+rbd import-diff foo.diff foo.copy && exit 1 || true # this should fail with EEXIST on the end snap
+rbd snap ls foo.copy | grep three
+
+rbd create foo.copy2 --size 1000
+rbd import-diff foo.diff foo.copy2 && exit 1 || true # this should fail bc the start snap dne
+
+rbd export foo foo.out
+orig=`md5sum foo.out | awk '{print $1}'`
+rm foo.out
+rbd export foo.copy foo.out
+copy=`md5sum foo.out | awk '{print $1}'`
+
+if [ "$orig" != "$copy" ]; then
+ echo does not match
+ exit 1
+fi
+
+cleanup
+
+echo OK
+
diff --git a/qa/workunits/rbd/diff_continuous.sh b/qa/workunits/rbd/diff_continuous.sh
new file mode 100755
index 000000000..fd1785e07
--- /dev/null
+++ b/qa/workunits/rbd/diff_continuous.sh
@@ -0,0 +1,106 @@
+#!/usr/bin/env bash
+
+set -ex
+set -o pipefail
+
+function untar_workload() {
+ local i
+ for ((i = 0; i < 10; i++)); do
+ pv -L 10M linux-5.4.tar.gz > "${MOUNT}/linux-5.4.tar.gz"
+ tar -C "${MOUNT}" -xzf "${MOUNT}/linux-5.4.tar.gz"
+ sync "${MOUNT}"
+ rm -rf "${MOUNT}"/linux-5.4*
+ done
+}
+
+function check_object_map() {
+ local spec="$1"
+
+ rbd object-map check "${spec}"
+
+ local flags
+ flags="$(rbd info "${spec}" | grep 'flags: ')"
+ if [[ "${flags}" =~ object\ map\ invalid ]]; then
+ echo "Object map invalid at ${spec}"
+ exit 1
+ fi
+ if [[ "${flags}" =~ fast\ diff\ invalid ]]; then
+ echo "Fast diff invalid at ${spec}"
+ exit 1
+ fi
+}
+
+# RBD_DEVICE_TYPE is intended to be set from yaml, default to krbd
+readonly DEVICE_TYPE="${RBD_DEVICE_TYPE:-krbd}"
+
+BASE_UUID="$(uuidgen)"
+readonly BASE_UUID
+
+readonly SIZE="2G"
+readonly SRC="${BASE_UUID}-src"
+readonly DST="${BASE_UUID}-dst"
+readonly MOUNT="${BASE_UUID}-mnt"
+
+rbd create -s "${SIZE}" --stripe-unit 64K --stripe-count 8 \
+ --image-feature exclusive-lock,object-map,fast-diff "${SRC}"
+rbd create -s "${SIZE}" --object-size 512K "${DST}"
+
+dev="$(sudo rbd device map -t "${DEVICE_TYPE}" "${SRC}")"
+sudo mkfs.ext4 "${dev}"
+mkdir "${MOUNT}"
+sudo mount "${dev}" "${MOUNT}"
+sudo chown "$(whoami)" "${MOUNT}"
+
+# start untar in the background
+wget https://download.ceph.com/qa/linux-5.4.tar.gz
+untar_workload &
+untar_pid=$!
+
+# export initial incremental
+snap_num=1
+rbd snap create "${SRC}@snap${snap_num}"
+rbd export-diff "${SRC}@snap${snap_num}" "${BASE_UUID}@snap${snap_num}.diff"
+
+# keep exporting successive incrementals while untar is running
+while kill -0 "${untar_pid}"; do
+ snap_num=$((snap_num + 1))
+ rbd snap create "${SRC}@snap${snap_num}"
+ sleep $((RANDOM % 4 + 1))
+ rbd export-diff --whole-object --from-snap "snap$((snap_num - 1))" \
+ "${SRC}@snap${snap_num}" "${BASE_UUID}@snap${snap_num}.diff"
+done
+
+sudo umount "${MOUNT}"
+sudo rbd device unmap -t "${DEVICE_TYPE}" "${dev}"
+
+if ! wait "${untar_pid}"; then
+ echo "untar_workload failed"
+ exit 1
+fi
+
+echo "Exported ${snap_num} incrementals"
+if ((snap_num < 30)); then
+ echo "Too few incrementals"
+ exit 1
+fi
+
+# validate
+for ((i = 1; i <= snap_num; i++)); do
+ rbd import-diff "${BASE_UUID}@snap${i}.diff" "${DST}"
+ src_sum="$(rbd export "${SRC}@snap${i}" - | md5sum | awk '{print $1}')"
+ dst_sum="$(rbd export "${DST}@snap${i}" - | md5sum | awk '{print $1}')"
+ if [[ "${src_sum}" != "${dst_sum}" ]]; then
+ echo "Mismatch at snap${i}: ${src_sum} != ${dst_sum}"
+ exit 1
+ fi
+ check_object_map "${SRC}@snap${i}"
+ # FIXME: this reproduces http://tracker.ceph.com/issues/37876
+ # there is no fstrim involved but "rbd import-diff" can produce
+ # write-zeroes requests which turn into discards under the hood
+ # actual: EXISTS, expected: EXISTS_CLEAN inconsistency is harmless
+ # from a data integrity POV and data is validated above regardless,
+ # so just waive it for now
+ #check_object_map "${DST}@snap${i}"
+done
+
+echo OK
diff --git a/qa/workunits/rbd/huge-tickets.sh b/qa/workunits/rbd/huge-tickets.sh
new file mode 100755
index 000000000..22853c07a
--- /dev/null
+++ b/qa/workunits/rbd/huge-tickets.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+
+# This is a test for http://tracker.ceph.com/issues/8979 and the fallout
+# from triaging it. #8979 itself was random crashes on corrupted memory
+# due to a buffer overflow (for tickets larger than 256 bytes), further
+# inspection showed that vmalloced tickets weren't handled correctly as
+# well.
+#
+# What we are doing here is generating three huge keyrings and feeding
+# them to libceph (through 'rbd map' on a scratch image). Bad kernels
+# will crash reliably either on corrupted memory somewhere or a bad page
+# fault in scatterwalk_pagedone().
+
+set -ex
+
+function generate_keyring() {
+ local user=$1
+ local n=$2
+
+ ceph-authtool -C -n client.$user --cap mon 'allow *' --gen-key /tmp/keyring-$user
+
+ set +x # don't pollute trace with echos
+ echo -en "\tcaps osd = \"allow rwx pool=rbd" >>/tmp/keyring-$user
+ for i in $(seq 1 $n); do
+ echo -n ", allow rwx pool=pool$i" >>/tmp/keyring-$user
+ done
+ echo "\"" >>/tmp/keyring-$user
+ set -x
+}
+
+generate_keyring foo 1000 # ~25K, kmalloc
+generate_keyring bar 20000 # ~500K, vmalloc
+generate_keyring baz 300000 # ~8M, vmalloc + sg chaining
+
+rbd create --size 1 test
+
+for user in {foo,bar,baz}; do
+ ceph auth import -i /tmp/keyring-$user
+ DEV=$(sudo rbd map -n client.$user --keyring /tmp/keyring-$user test)
+ sudo rbd unmap $DEV
+done
diff --git a/qa/workunits/rbd/image_read.sh b/qa/workunits/rbd/image_read.sh
new file mode 100755
index 000000000..ddca8356e
--- /dev/null
+++ b/qa/workunits/rbd/image_read.sh
@@ -0,0 +1,680 @@
+#!/usr/bin/env bash
+
+# Copyright (C) 2013 Inktank Storage, Inc.
+#
+# This is free software; see the source for copying conditions.
+# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE.
+#
+# This is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as
+# published by the Free Software Foundation version 2.
+
+# Alex Elder <elder@inktank.com>
+# April 10, 2013
+
+################################################################
+
+# The purpose of this test is to validate that data read from a
+# mapped rbd image is what it's expected to be.
+#
+# By default it creates an image and fills it with some data. It
+# then reads back the data at a series of offsets known to cover
+# various situations (such as reading the beginning, end, or the
+# entirety of an object, or doing a read that spans multiple
+# objects), and stashes the results in a set of local files.
+#
+# It also creates and maps a snapshot of the original image after
+# it's been filled, and reads back the same ranges of data from the
+# snapshot. It then compares the data read back with what was read
+# back from the original image, verifying they match.
+#
+# Clone functionality is tested as well, in which case a clone is
+# made of the snapshot, and the same ranges of data are again read
+# and compared with the original. In addition, a snapshot of that
+# clone is created, and a clone of *that* snapshot is put through
+# the same set of tests. (Clone testing can be optionally skipped.)
+
+################################################################
+
+# Default parameter values. Environment variables, if set, will
+# supercede these defaults. Such variables have names that begin
+# with "IMAGE_READ_", for e.g. use IMAGE_READ_PAGE_SIZE=65536
+# to use 65536 as the page size.
+set -e
+
+DEFAULT_VERBOSE=true
+DEFAULT_TEST_CLONES=true
+DEFAULT_LOCAL_FILES=false
+DEFAULT_FORMAT=2
+DEFAULT_DOUBLE_ORDER=true
+DEFAULT_HALF_ORDER=false
+DEFAULT_PAGE_SIZE=4096
+DEFAULT_OBJECT_ORDER=22
+MIN_OBJECT_ORDER=12 # technically 9, but the rbd CLI enforces 12
+MAX_OBJECT_ORDER=32
+
+RBD_FORCE_ALLOW_V1=1
+
+PROGNAME=$(basename $0)
+
+ORIGINAL=original-$$
+SNAP1=snap1-$$
+CLONE1=clone1-$$
+SNAP2=snap2-$$
+CLONE2=clone2-$$
+
+function err() {
+ if [ $# -gt 0 ]; then
+ echo "${PROGNAME}: $@" >&2
+ fi
+ exit 2
+}
+
+function usage() {
+ if [ $# -gt 0 ]; then
+ echo "" >&2
+ echo "${PROGNAME}: $@" >&2
+ fi
+ echo "" >&2
+ echo "Usage: ${PROGNAME} [<options>]" >&2
+ echo "" >&2
+ echo "options are:" >&2
+ echo " -o object_order" >&2
+ echo " must be ${MIN_OBJECT_ORDER}..${MAX_OBJECT_ORDER}" >&2
+ echo " -p page_size (in bytes)" >&2
+ echo " note: there must be at least 4 pages per object" >&2
+ echo " -1" >&2
+ echo " test using format 1 rbd images (default)" >&2
+ echo " -2" >&2
+ echo " test using format 2 rbd images" >&2
+ echo " -c" >&2
+ echo " also test rbd clone images (implies format 2)" >&2
+ echo " -d" >&2
+ echo " clone object order double its parent's (format 2)" >&2
+ echo " -h" >&2
+ echo " clone object order half of its parent's (format 2)" >&2
+ echo " -l" >&2
+ echo " use local files rather than rbd images" >&2
+ echo " -v" >&2
+ echo " disable reporting of what's going on" >&2
+ echo "" >&2
+ exit 1
+}
+
+function verbose() {
+ [ "${VERBOSE}" = true ] && echo "$@"
+ true # Don't let the verbose test spoil our return value
+}
+
+function quiet() {
+ "$@" 2> /dev/null
+}
+
+function boolean_toggle() {
+ [ $# -eq 1 ] || exit 99
+ test "$1" = "true" && echo false || echo true
+}
+
+function parseargs() {
+ local opts="o:p:12clv"
+ local lopts="order:,page_size:,local,clone,verbose"
+ local parsed
+ local clone_order_msg
+
+ # use values from environment if available
+ VERBOSE="${IMAGE_READ_VERBOSE:-${DEFAULT_VERBOSE}}"
+ TEST_CLONES="${IMAGE_READ_TEST_CLONES:-${DEFAULT_TEST_CLONES}}"
+ LOCAL_FILES="${IMAGE_READ_LOCAL_FILES:-${DEFAULT_LOCAL_FILES}}"
+ DOUBLE_ORDER="${IMAGE_READ_DOUBLE_ORDER:-${DEFAULT_DOUBLE_ORDER}}"
+ HALF_ORDER="${IMAGE_READ_HALF_ORDER:-${DEFAULT_HALF_ORDER}}"
+ FORMAT="${IMAGE_READ_FORMAT:-${DEFAULT_FORMAT}}"
+ PAGE_SIZE="${IMAGE_READ_PAGE_SIZE:-${DEFAULT_PAGE_SIZE}}"
+ OBJECT_ORDER="${IMAGE_READ_OBJECT_ORDER:-${DEFAULT_OBJECT_ORDER}}"
+
+ parsed=$(getopt -o "${opts}" -l "${lopts}" -n "${PROGNAME}" -- "$@") ||
+ usage
+ eval set -- "${parsed}"
+ while true; do
+ case "$1" in
+ -v|--verbose)
+ VERBOSE=$(boolean_toggle "${VERBOSE}");;
+ -c|--clone)
+ TEST_CLONES=$(boolean_toggle "${TEST_CLONES}");;
+ -d|--double)
+ DOUBLE_ORDER=$(boolean_toggle "${DOUBLE_ORDER}");;
+ -h|--half)
+ HALF_ORDER=$(boolean_toggle "${HALF_ORDER}");;
+ -l|--local)
+ LOCAL_FILES=$(boolean_toggle "${LOCAL_FILES}");;
+ -1|-2)
+ FORMAT="${1:1}";;
+ -p|--page_size)
+ PAGE_SIZE="$2"; shift;;
+ -o|--order)
+ OBJECT_ORDER="$2"; shift;;
+ --)
+ shift; break;;
+ *)
+ err "getopt internal error"
+ esac
+ shift
+ done
+ [ $# -gt 0 ] && usage "excess arguments ($*)"
+
+ if [ "${TEST_CLONES}" = true ]; then
+ # If we're using different object orders for clones,
+ # make sure the limits are updated accordingly. If
+ # both "half" and "double" are specified, just
+ # ignore them both.
+ if [ "${DOUBLE_ORDER}" = true ]; then
+ if [ "${HALF_ORDER}" = true ]; then
+ DOUBLE_ORDER=false
+ HALF_ORDER=false
+ else
+ ((MAX_OBJECT_ORDER -= 2))
+ fi
+ elif [ "${HALF_ORDER}" = true ]; then
+ ((MIN_OBJECT_ORDER += 2))
+ fi
+ fi
+
+ [ "${OBJECT_ORDER}" -lt "${MIN_OBJECT_ORDER}" ] &&
+ usage "object order (${OBJECT_ORDER}) must be" \
+ "at least ${MIN_OBJECT_ORDER}"
+ [ "${OBJECT_ORDER}" -gt "${MAX_OBJECT_ORDER}" ] &&
+ usage "object order (${OBJECT_ORDER}) must be" \
+ "at most ${MAX_OBJECT_ORDER}"
+
+ if [ "${TEST_CLONES}" = true ]; then
+ if [ "${DOUBLE_ORDER}" = true ]; then
+ ((CLONE1_ORDER = OBJECT_ORDER + 1))
+ ((CLONE2_ORDER = OBJECT_ORDER + 2))
+ clone_order_msg="double"
+ elif [ "${HALF_ORDER}" = true ]; then
+ ((CLONE1_ORDER = OBJECT_ORDER - 1))
+ ((CLONE2_ORDER = OBJECT_ORDER - 2))
+ clone_order_msg="half of"
+ else
+ CLONE1_ORDER="${OBJECT_ORDER}"
+ CLONE2_ORDER="${OBJECT_ORDER}"
+ clone_order_msg="the same as"
+ fi
+ fi
+
+ [ "${TEST_CLONES}" != true ] || FORMAT=2
+
+ OBJECT_SIZE=$(echo "2 ^ ${OBJECT_ORDER}" | bc)
+ OBJECT_PAGES=$(echo "${OBJECT_SIZE} / ${PAGE_SIZE}" | bc)
+ IMAGE_SIZE=$((2 * 16 * OBJECT_SIZE / (1024 * 1024)))
+ [ "${IMAGE_SIZE}" -lt 1 ] && IMAGE_SIZE=1
+ IMAGE_OBJECTS=$((IMAGE_SIZE * (1024 * 1024) / OBJECT_SIZE))
+
+ [ "${OBJECT_PAGES}" -lt 4 ] &&
+ usage "object size (${OBJECT_SIZE}) must be" \
+ "at least 4 * page size (${PAGE_SIZE})"
+
+ echo "parameters for this run:"
+ echo " format ${FORMAT} images will be tested"
+ echo " object order is ${OBJECT_ORDER}, so" \
+ "objects are ${OBJECT_SIZE} bytes"
+ echo " page size is ${PAGE_SIZE} bytes, so" \
+ "there are are ${OBJECT_PAGES} pages in an object"
+ echo " derived image size is ${IMAGE_SIZE} MB, so" \
+ "there are ${IMAGE_OBJECTS} objects in an image"
+ if [ "${TEST_CLONES}" = true ]; then
+ echo " clone functionality will be tested"
+ echo " object size for a clone will be ${clone_order_msg}"
+ echo " the object size of its parent image"
+ fi
+
+ true # Don't let the clones test spoil our return value
+}
+
+function image_dev_path() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1"
+
+ if [ "${LOCAL_FILES}" = true ]; then
+ echo "${TEMP}/${image_name}"
+ return
+ fi
+
+ echo "/dev/rbd/rbd/${image_name}"
+}
+
+function out_data_dir() {
+ [ $# -lt 2 ] || exit 99
+ local out_data="${TEMP}/data"
+ local image_name
+
+ if [ $# -eq 1 ]; then
+ image_name="$1"
+ echo "${out_data}/${image_name}"
+ else
+ echo "${out_data}"
+ fi
+}
+
+function setup() {
+ verbose "===== setting up ====="
+ TEMP=$(mktemp -d /tmp/rbd_image_read.XXXXX)
+ mkdir -p $(out_data_dir)
+
+ # create and fill the original image with some data
+ create_image "${ORIGINAL}"
+ map_image "${ORIGINAL}"
+ fill_original
+
+ # create a snapshot of the original
+ create_image_snap "${ORIGINAL}" "${SNAP1}"
+ map_image_snap "${ORIGINAL}" "${SNAP1}"
+
+ if [ "${TEST_CLONES}" = true ]; then
+ # create a clone of the original snapshot
+ create_snap_clone "${ORIGINAL}" "${SNAP1}" \
+ "${CLONE1}" "${CLONE1_ORDER}"
+ map_image "${CLONE1}"
+
+ # create a snapshot of that clone
+ create_image_snap "${CLONE1}" "${SNAP2}"
+ map_image_snap "${CLONE1}" "${SNAP2}"
+
+ # create a clone of that clone's snapshot
+ create_snap_clone "${CLONE1}" "${SNAP2}" \
+ "${CLONE2}" "${CLONE2_ORDER}"
+ map_image "${CLONE2}"
+ fi
+}
+
+function teardown() {
+ verbose "===== cleaning up ====="
+ if [ "${TEST_CLONES}" = true ]; then
+ unmap_image "${CLONE2}" || true
+ destroy_snap_clone "${CLONE1}" "${SNAP2}" "${CLONE2}" || true
+
+ unmap_image_snap "${CLONE1}" "${SNAP2}" || true
+ destroy_image_snap "${CLONE1}" "${SNAP2}" || true
+
+ unmap_image "${CLONE1}" || true
+ destroy_snap_clone "${ORIGINAL}" "${SNAP1}" "${CLONE1}" || true
+ fi
+ unmap_image_snap "${ORIGINAL}" "${SNAP1}" || true
+ destroy_image_snap "${ORIGINAL}" "${SNAP1}" || true
+ unmap_image "${ORIGINAL}" || true
+ destroy_image "${ORIGINAL}" || true
+
+ rm -rf $(out_data_dir)
+ rmdir "${TEMP}"
+}
+
+function create_image() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1"
+ local image_path
+ local bytes
+
+ verbose "creating image \"${image_name}\""
+ if [ "${LOCAL_FILES}" = true ]; then
+ image_path=$(image_dev_path "${image_name}")
+ bytes=$(echo "${IMAGE_SIZE} * 1024 * 1024 - 1" | bc)
+ quiet dd if=/dev/zero bs=1 count=1 seek="${bytes}" \
+ of="${image_path}"
+ return
+ fi
+
+ rbd create "${image_name}" --image-format "${FORMAT}" \
+ --size "${IMAGE_SIZE}" --order "${OBJECT_ORDER}" \
+ --image-shared
+}
+
+function destroy_image() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1"
+ local image_path
+
+ verbose "destroying image \"${image_name}\""
+ if [ "${LOCAL_FILES}" = true ]; then
+ image_path=$(image_dev_path "${image_name}")
+ rm -f "${image_path}"
+ return
+ fi
+
+ rbd rm "${image_name}"
+}
+
+function map_image() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1" # can be image@snap too
+
+ if [ "${LOCAL_FILES}" = true ]; then
+ return
+ fi
+
+ sudo rbd map "${image_name}"
+}
+
+function unmap_image() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1" # can be image@snap too
+ local image_path
+
+ if [ "${LOCAL_FILES}" = true ]; then
+ return
+ fi
+ image_path=$(image_dev_path "${image_name}")
+
+ if [ -e "${image_path}" ]; then
+ sudo rbd unmap "${image_path}"
+ fi
+}
+
+function map_image_snap() {
+ [ $# -eq 2 ] || exit 99
+ local image_name="$1"
+ local snap_name="$2"
+ local image_snap
+
+ if [ "${LOCAL_FILES}" = true ]; then
+ return
+ fi
+
+ image_snap="${image_name}@${snap_name}"
+ map_image "${image_snap}"
+}
+
+function unmap_image_snap() {
+ [ $# -eq 2 ] || exit 99
+ local image_name="$1"
+ local snap_name="$2"
+ local image_snap
+
+ if [ "${LOCAL_FILES}" = true ]; then
+ return
+ fi
+
+ image_snap="${image_name}@${snap_name}"
+ unmap_image "${image_snap}"
+}
+
+function create_image_snap() {
+ [ $# -eq 2 ] || exit 99
+ local image_name="$1"
+ local snap_name="$2"
+ local image_snap="${image_name}@${snap_name}"
+ local image_path
+ local snap_path
+
+ verbose "creating snapshot \"${snap_name}\"" \
+ "of image \"${image_name}\""
+ if [ "${LOCAL_FILES}" = true ]; then
+ image_path=$(image_dev_path "${image_name}")
+ snap_path=$(image_dev_path "${image_snap}")
+
+ cp "${image_path}" "${snap_path}"
+ return
+ fi
+
+ rbd snap create "${image_snap}"
+}
+
+function destroy_image_snap() {
+ [ $# -eq 2 ] || exit 99
+ local image_name="$1"
+ local snap_name="$2"
+ local image_snap="${image_name}@${snap_name}"
+ local snap_path
+
+ verbose "destroying snapshot \"${snap_name}\"" \
+ "of image \"${image_name}\""
+ if [ "${LOCAL_FILES}" = true ]; then
+ snap_path=$(image_dev_path "${image_snap}")
+ rm -rf "${snap_path}"
+ return
+ fi
+
+ rbd snap rm "${image_snap}"
+}
+
+function create_snap_clone() {
+ [ $# -eq 4 ] || exit 99
+ local image_name="$1"
+ local snap_name="$2"
+ local clone_name="$3"
+ local clone_order="$4"
+ local image_snap="${image_name}@${snap_name}"
+ local snap_path
+ local clone_path
+
+ verbose "creating clone image \"${clone_name}\"" \
+ "of image snapshot \"${image_name}@${snap_name}\""
+ if [ "${LOCAL_FILES}" = true ]; then
+ snap_path=$(image_dev_path "${image_name}@${snap_name}")
+ clone_path=$(image_dev_path "${clone_name}")
+
+ cp "${snap_path}" "${clone_path}"
+ return
+ fi
+
+ rbd snap protect "${image_snap}"
+ rbd clone --order "${clone_order}" --image-shared \
+ "${image_snap}" "${clone_name}"
+}
+
+function destroy_snap_clone() {
+ [ $# -eq 3 ] || exit 99
+ local image_name="$1"
+ local snap_name="$2"
+ local clone_name="$3"
+ local image_snap="${image_name}@${snap_name}"
+ local clone_path
+
+ verbose "destroying clone image \"${clone_name}\""
+ if [ "${LOCAL_FILES}" = true ]; then
+ clone_path=$(image_dev_path "${clone_name}")
+
+ rm -rf "${clone_path}"
+ return
+ fi
+
+ rbd rm "${clone_name}"
+ rbd snap unprotect "${image_snap}"
+}
+
+# function that produces "random" data with which to fill the image
+function source_data() {
+ while quiet dd if=/bin/bash skip=$(($$ % 199)) bs="${PAGE_SIZE}"; do
+ : # Just do the dd
+ done
+}
+
+function fill_original() {
+ local image_path=$(image_dev_path "${ORIGINAL}")
+
+ verbose "filling original image"
+ # Fill 16 objects worth of "random" data
+ source_data |
+ quiet dd bs="${PAGE_SIZE}" count=$((16 * OBJECT_PAGES)) \
+ of="${image_path}"
+}
+
+function do_read() {
+ [ $# -eq 3 -o $# -eq 4 ] || exit 99
+ local image_name="$1"
+ local offset="$2"
+ local length="$3"
+ [ "${length}" -gt 0 ] || err "do_read: length must be non-zero"
+ local image_path=$(image_dev_path "${image_name}")
+ local out_data=$(out_data_dir "${image_name}")
+ local range=$(printf "%06u~%04u" "${offset}" "${length}")
+ local out_file
+
+ [ $# -eq 4 ] && offset=$((offset + 16 * OBJECT_PAGES))
+
+ verbose "reading \"${image_name}\" pages ${range}"
+
+ out_file="${out_data}/pages_${range}"
+
+ quiet dd bs="${PAGE_SIZE}" skip="${offset}" count="${length}" \
+ if="${image_path}" of="${out_file}"
+}
+
+function one_pass() {
+ [ $# -eq 1 -o $# -eq 2 ] || exit 99
+ local image_name="$1"
+ local extended
+ [ $# -eq 2 ] && extended="true"
+ local offset
+ local length
+
+ offset=0
+
+ # +-----------+-----------+---
+ # |X:X:X...X:X| : : ... : | :
+ # +-----------+-----------+---
+ length="${OBJECT_PAGES}"
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+---
+ # : |X: : ... : | :
+ # ---+-----------+---
+ length=1
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+---
+ # : | :X: ... : | :
+ # ---+-----------+---
+ length=1
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+---
+ # : | : :X...X: | :
+ # ---+-----------+---
+ length=$((OBJECT_PAGES - 3))
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+---
+ # : | : : ... :X| :
+ # ---+-----------+---
+ length=1
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+---
+ # : |X:X:X...X:X| :
+ # ---+-----------+---
+ length="${OBJECT_PAGES}"
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ offset=$((offset + 1)) # skip 1
+
+ # ---+-----------+---
+ # : | :X:X...X:X| :
+ # ---+-----------+---
+ length=$((OBJECT_PAGES - 1))
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+-----------+---
+ # : |X:X:X...X:X|X: : ... : | :
+ # ---+-----------+-----------+---
+ length=$((OBJECT_PAGES + 1))
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+-----------+---
+ # : | :X:X...X:X|X: : ... : | :
+ # ---+-----------+-----------+---
+ length="${OBJECT_PAGES}"
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+-----------+---
+ # : | :X:X...X:X|X:X: ... : | :
+ # ---+-----------+-----------+---
+ length=$((OBJECT_PAGES + 1))
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # ---+-----------+-----------+---
+ # : | : :X...X:X|X:X:X...X:X| :
+ # ---+-----------+-----------+---
+ length=$((2 * OBJECT_PAGES + 2))
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ offset=$((offset + 1)) # skip 1
+
+ # ---+-----------+-----------+-----
+ # : | :X:X...X:X|X:X:X...X:X|X: :
+ # ---+-----------+-----------+-----
+ length=$((2 * OBJECT_PAGES))
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ offset=$((offset + length))
+
+ # --+-----------+-----------+--------
+ # : | :X:X...X:X|X:X:X...X:X|X:X: :
+ # --+-----------+-----------+--------
+ length=2049
+ length=$((2 * OBJECT_PAGES + 1))
+ do_read "${image_name}" "${offset}" "${length}" ${extended}
+ # offset=$((offset + length))
+}
+
+function run_using() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1"
+ local out_data=$(out_data_dir "${image_name}")
+
+ verbose "===== running using \"${image_name}\" ====="
+ mkdir -p "${out_data}"
+ one_pass "${image_name}"
+ one_pass "${image_name}" extended
+}
+
+function compare() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1"
+ local out_data=$(out_data_dir "${image_name}")
+ local original=$(out_data_dir "${ORIGINAL}")
+
+ verbose "===== comparing \"${image_name}\" ====="
+ for i in $(ls "${original}"); do
+ verbose compare "\"${image_name}\" \"${i}\""
+ cmp "${original}/${i}" "${out_data}/${i}"
+ done
+ [ "${image_name}" = "${ORIGINAL}" ] || rm -rf "${out_data}"
+}
+
+function doit() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1"
+
+ run_using "${image_name}"
+ compare "${image_name}"
+}
+
+########## Start
+
+parseargs "$@"
+
+trap teardown EXIT HUP INT
+setup
+
+run_using "${ORIGINAL}"
+doit "${ORIGINAL}@${SNAP1}"
+if [ "${TEST_CLONES}" = true ]; then
+ doit "${CLONE1}"
+ doit "${CLONE1}@${SNAP2}"
+ doit "${CLONE2}"
+fi
+rm -rf $(out_data_dir "${ORIGINAL}")
+
+echo "Success!"
+
+exit 0
diff --git a/qa/workunits/rbd/import_export.sh b/qa/workunits/rbd/import_export.sh
new file mode 100755
index 000000000..89e8d35cf
--- /dev/null
+++ b/qa/workunits/rbd/import_export.sh
@@ -0,0 +1,259 @@
+#!/bin/sh -ex
+
+# V1 image unsupported but required for testing purposes
+export RBD_FORCE_ALLOW_V1=1
+
+# returns data pool for a given image
+get_image_data_pool () {
+ image=$1
+ data_pool=$(rbd info $image | grep "data_pool: " | awk -F':' '{ print $NF }')
+ if [ -z $data_pool ]; then
+ data_pool='rbd'
+ fi
+
+ echo $data_pool
+}
+
+# return list of object numbers populated in image
+objects () {
+ image=$1
+ prefix=$(rbd info $image | grep block_name_prefix | awk '{print $NF;}')
+
+ # strip off prefix and leading zeros from objects; sort, although
+ # it doesn't necessarily make sense as they're hex, at least it makes
+ # the list repeatable and comparable
+ objects=$(rados ls -p $(get_image_data_pool $image) | grep $prefix | \
+ sed -e 's/'$prefix'\.//' -e 's/^0*\([0-9a-f]\)/\1/' | sort -u)
+ echo $objects
+}
+
+# return false if either files don't compare or their ondisk
+# sizes don't compare
+
+compare_files_and_ondisk_sizes () {
+ cmp -l $1 $2 || return 1
+ origsize=$(stat $1 --format %b)
+ exportsize=$(stat $2 --format %b)
+ difference=$(($exportsize - $origsize))
+ difference=${difference#-} # absolute value
+ test $difference -ge 0 -a $difference -lt 4096
+}
+
+TMPDIR=/tmp/rbd_import_export_$$
+rm -rf $TMPDIR
+mkdir $TMPDIR
+trap "rm -rf $TMPDIR" INT TERM EXIT
+
+# cannot import a dir
+mkdir foo.$$
+rbd import foo.$$ foo.dir && exit 1 || true # should fail
+rmdir foo.$$
+
+# create a sparse file
+dd if=/bin/sh of=${TMPDIR}/img bs=1k count=1 seek=10
+dd if=/bin/dd of=${TMPDIR}/img bs=1k count=10 seek=100
+dd if=/bin/rm of=${TMPDIR}/img bs=1k count=100 seek=1000
+dd if=/bin/ls of=${TMPDIR}/img bs=1k seek=10000
+dd if=/bin/ln of=${TMPDIR}/img bs=1k seek=100000
+dd if=/bin/grep of=${TMPDIR}/img bs=1k seek=1000000
+
+rbd rm testimg || true
+
+rbd import $RBD_CREATE_ARGS ${TMPDIR}/img testimg
+rbd export testimg ${TMPDIR}/img2
+rbd export testimg - > ${TMPDIR}/img3
+rbd rm testimg
+cmp ${TMPDIR}/img ${TMPDIR}/img2
+cmp ${TMPDIR}/img ${TMPDIR}/img3
+rm ${TMPDIR}/img2 ${TMPDIR}/img3
+
+# try again, importing from stdin
+rbd import $RBD_CREATE_ARGS - testimg < ${TMPDIR}/img
+rbd export testimg ${TMPDIR}/img2
+rbd export testimg - > ${TMPDIR}/img3
+rbd rm testimg
+cmp ${TMPDIR}/img ${TMPDIR}/img2
+cmp ${TMPDIR}/img ${TMPDIR}/img3
+
+rm ${TMPDIR}/img ${TMPDIR}/img2 ${TMPDIR}/img3
+
+if rbd help export | grep -q export-format; then
+ # try with --export-format for snapshots
+ dd if=/bin/dd of=${TMPDIR}/img bs=1k count=10 seek=100
+ rbd import $RBD_CREATE_ARGS ${TMPDIR}/img testimg
+ rbd snap create testimg@snap
+ rbd image-meta set testimg key1 value1
+ IMAGEMETA_BEFORE=`rbd image-meta list testimg`
+ rbd export --export-format 2 testimg ${TMPDIR}/img_v2
+ rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import
+ rbd info testimg_import
+ rbd info testimg_import@snap
+ IMAGEMETA_AFTER=`rbd image-meta list testimg_import`
+ [ "$IMAGEMETA_BEFORE" = "$IMAGEMETA_AFTER" ]
+
+ # compare the contents between testimg and testimg_import
+ rbd export testimg_import ${TMPDIR}/img_import
+ compare_files_and_ondisk_sizes ${TMPDIR}/img ${TMPDIR}/img_import
+
+ rbd export testimg@snap ${TMPDIR}/img_snap
+ rbd export testimg_import@snap ${TMPDIR}/img_snap_import
+ compare_files_and_ondisk_sizes ${TMPDIR}/img_snap ${TMPDIR}/img_snap_import
+
+ rm ${TMPDIR}/img_v2
+ rm ${TMPDIR}/img_import
+ rm ${TMPDIR}/img_snap
+ rm ${TMPDIR}/img_snap_import
+
+ rbd snap rm testimg_import@snap
+ rbd remove testimg_import
+ rbd snap rm testimg@snap
+ rbd rm testimg
+
+ # order
+ rbd import --order 20 ${TMPDIR}/img testimg
+ rbd export --export-format 2 testimg ${TMPDIR}/img_v2
+ rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import
+ rbd info testimg_import|grep order|awk '{print $2}'|grep 20
+
+ rm ${TMPDIR}/img_v2
+
+ rbd remove testimg_import
+ rbd remove testimg
+
+ # features
+ rbd import --image-feature layering ${TMPDIR}/img testimg
+ FEATURES_BEFORE=`rbd info testimg|grep features`
+ rbd export --export-format 2 testimg ${TMPDIR}/img_v2
+ rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import
+ FEATURES_AFTER=`rbd info testimg_import|grep features`
+ if [ "$FEATURES_BEFORE" != "$FEATURES_AFTER" ]; then
+ false
+ fi
+
+ rm ${TMPDIR}/img_v2
+
+ rbd remove testimg_import
+ rbd remove testimg
+
+ # stripe
+ rbd import --stripe-count 1000 --stripe-unit 4096 ${TMPDIR}/img testimg
+ rbd export --export-format 2 testimg ${TMPDIR}/img_v2
+ rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import
+ rbd info testimg_import|grep "stripe unit"|grep -Ei '(4 KiB|4096)'
+ rbd info testimg_import|grep "stripe count"|awk '{print $3}'|grep 1000
+
+ rm ${TMPDIR}/img_v2
+
+ rbd remove testimg_import
+ rbd remove testimg
+
+ # snap protect
+ rbd import --image-format=2 ${TMPDIR}/img testimg
+ rbd snap create testimg@snap1
+ rbd snap create testimg@snap2
+ rbd snap protect testimg@snap2
+ rbd export --export-format 2 testimg ${TMPDIR}/snap_protect
+ rbd import --export-format 2 ${TMPDIR}/snap_protect testimg_import
+ rbd info testimg_import@snap1 | grep 'protected: False'
+ rbd info testimg_import@snap2 | grep 'protected: True'
+
+ rm ${TMPDIR}/snap_protect
+
+ rbd snap unprotect testimg@snap2
+ rbd snap unprotect testimg_import@snap2
+ rbd snap purge testimg
+ rbd snap purge testimg_import
+ rbd remove testimg
+ rbd remove testimg_import
+fi
+
+tiered=0
+if ceph osd dump | grep ^pool | grep "'rbd'" | grep tier; then
+ tiered=1
+fi
+
+# create specifically sparse files
+# 1 1M block of sparse, 1 1M block of random
+dd if=/dev/urandom bs=1M seek=1 count=1 of=${TMPDIR}/sparse1
+
+# 1 1M block of random, 1 1M block of sparse
+dd if=/dev/urandom bs=1M count=1 of=${TMPDIR}/sparse2; truncate ${TMPDIR}/sparse2 -s 2M
+
+# 1M-block images; validate resulting blocks
+
+# 1M sparse, 1M data
+rbd rm sparse1 || true
+rbd import $RBD_CREATE_ARGS --order 20 ${TMPDIR}/sparse1
+rbd ls -l | grep sparse1 | grep -Ei '(2 MiB|2048k)'
+[ $tiered -eq 1 -o "$(objects sparse1)" = '1' ]
+
+# export, compare contents and on-disk size
+rbd export sparse1 ${TMPDIR}/sparse1.out
+compare_files_and_ondisk_sizes ${TMPDIR}/sparse1 ${TMPDIR}/sparse1.out
+rm ${TMPDIR}/sparse1.out
+rbd rm sparse1
+
+# 1M data, 1M sparse
+rbd rm sparse2 || true
+rbd import $RBD_CREATE_ARGS --order 20 ${TMPDIR}/sparse2
+rbd ls -l | grep sparse2 | grep -Ei '(2 MiB|2048k)'
+[ $tiered -eq 1 -o "$(objects sparse2)" = '0' ]
+rbd export sparse2 ${TMPDIR}/sparse2.out
+compare_files_and_ondisk_sizes ${TMPDIR}/sparse2 ${TMPDIR}/sparse2.out
+rm ${TMPDIR}/sparse2.out
+rbd rm sparse2
+
+# extend sparse1 to 10 1M blocks, sparse at the end
+truncate ${TMPDIR}/sparse1 -s 10M
+# import from stdin just for fun, verify still sparse
+rbd import $RBD_CREATE_ARGS --order 20 - sparse1 < ${TMPDIR}/sparse1
+rbd ls -l | grep sparse1 | grep -Ei '(10 MiB|10240k)'
+[ $tiered -eq 1 -o "$(objects sparse1)" = '1' ]
+rbd export sparse1 ${TMPDIR}/sparse1.out
+compare_files_and_ondisk_sizes ${TMPDIR}/sparse1 ${TMPDIR}/sparse1.out
+rm ${TMPDIR}/sparse1.out
+rbd rm sparse1
+
+# extend sparse2 to 4M total with two more nonsparse megs
+dd if=/dev/urandom bs=2M count=1 of=${TMPDIR}/sparse2 oflag=append conv=notrunc
+# again from stding
+rbd import $RBD_CREATE_ARGS --order 20 - sparse2 < ${TMPDIR}/sparse2
+rbd ls -l | grep sparse2 | grep -Ei '(4 MiB|4096k)'
+[ $tiered -eq 1 -o "$(objects sparse2)" = '0 2 3' ]
+rbd export sparse2 ${TMPDIR}/sparse2.out
+compare_files_and_ondisk_sizes ${TMPDIR}/sparse2 ${TMPDIR}/sparse2.out
+rm ${TMPDIR}/sparse2.out
+rbd rm sparse2
+
+# zeros import to a sparse image. Note: all zeros currently
+# doesn't work right now due to the way we handle 'empty' fiemaps;
+# the image ends up zero-filled.
+
+echo "partially-sparse file imports to partially-sparse image"
+rbd import $RBD_CREATE_ARGS --order 20 ${TMPDIR}/sparse1 sparse
+[ $tiered -eq 1 -o "$(objects sparse)" = '1' ]
+rbd rm sparse
+
+echo "zeros import through stdin to sparse image"
+# stdin
+dd if=/dev/zero bs=1M count=4 | rbd import $RBD_CREATE_ARGS - sparse
+[ $tiered -eq 1 -o "$(objects sparse)" = '' ]
+rbd rm sparse
+
+echo "zeros export to sparse file"
+# Must be tricky to make image "by hand" ; import won't create a zero image
+rbd create $RBD_CREATE_ARGS sparse --size 4
+prefix=$(rbd info sparse | grep block_name_prefix | awk '{print $NF;}')
+# drop in 0 object directly
+dd if=/dev/zero bs=4M count=1 | rados -p $(get_image_data_pool sparse) \
+ put ${prefix}.000000000000 -
+[ $tiered -eq 1 -o "$(objects sparse)" = '0' ]
+# 1 object full of zeros; export should still create 0-disk-usage file
+rm ${TMPDIR}/sparse || true
+rbd export sparse ${TMPDIR}/sparse
+[ $(stat ${TMPDIR}/sparse --format=%b) = '0' ]
+rbd rm sparse
+
+rm ${TMPDIR}/sparse ${TMPDIR}/sparse1 ${TMPDIR}/sparse2 ${TMPDIR}/sparse3 || true
+
+echo OK
diff --git a/qa/workunits/rbd/issue-20295.sh b/qa/workunits/rbd/issue-20295.sh
new file mode 100755
index 000000000..3d617a066
--- /dev/null
+++ b/qa/workunits/rbd/issue-20295.sh
@@ -0,0 +1,18 @@
+#!/bin/sh -ex
+
+TEST_POOL=ecpool
+TEST_IMAGE=test1
+PGS=12
+
+ceph osd pool create $TEST_POOL $PGS $PGS erasure
+ceph osd pool application enable $TEST_POOL rbd
+ceph osd pool set $TEST_POOL allow_ec_overwrites true
+rbd --data-pool $TEST_POOL create --size 1024G $TEST_IMAGE
+rbd bench \
+ --io-type write \
+ --io-size 4096 \
+ --io-pattern=rand \
+ --io-total 100M \
+ $TEST_IMAGE
+
+echo "OK"
diff --git a/qa/workunits/rbd/journal.sh b/qa/workunits/rbd/journal.sh
new file mode 100755
index 000000000..ba89e75c9
--- /dev/null
+++ b/qa/workunits/rbd/journal.sh
@@ -0,0 +1,326 @@
+#!/usr/bin/env bash
+set -e
+
+. $(dirname $0)/../../standalone/ceph-helpers.sh
+
+function list_tests()
+{
+ echo "AVAILABLE TESTS"
+ for i in $TESTS; do
+ echo " $i"
+ done
+}
+
+function usage()
+{
+ echo "usage: $0 [-h|-l|-t <testname> [-t <testname>...] [--no-cleanup]]"
+}
+
+function expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+function save_commit_position()
+{
+ local journal=$1
+
+ rados -p rbd getomapval journal.${journal} client_ \
+ $TMPDIR/${journal}.client_.omap
+}
+
+function restore_commit_position()
+{
+ local journal=$1
+
+ rados -p rbd setomapval journal.${journal} client_ \
+ < $TMPDIR/${journal}.client_.omap
+}
+
+test_rbd_journal()
+{
+ local image=testrbdjournal$$
+
+ rbd create --image-feature exclusive-lock --image-feature journaling \
+ --size 128 ${image}
+ local journal=$(rbd info ${image} --format=xml 2>/dev/null |
+ $XMLSTARLET sel -t -v "//image/journal")
+ test -n "${journal}"
+ rbd journal info ${journal}
+ rbd journal info --journal ${journal}
+ rbd journal info --image ${image}
+
+ rbd feature disable ${image} journaling
+
+ rbd info ${image} --format=xml 2>/dev/null |
+ expect_false $XMLSTARLET sel -t -v "//image/journal"
+ expect_false rbd journal info ${journal}
+ expect_false rbd journal info --image ${image}
+
+ rbd feature enable ${image} journaling
+
+ local journal1=$(rbd info ${image} --format=xml 2>/dev/null |
+ $XMLSTARLET sel -t -v "//image/journal")
+ test "${journal}" = "${journal1}"
+
+ rbd journal info ${journal}
+
+ rbd journal status ${journal}
+
+ local count=10
+ save_commit_position ${journal}
+ rbd bench --io-type write ${image} --io-size 4096 --io-threads 1 \
+ --io-total $((4096 * count)) --io-pattern seq
+ rbd journal status --image ${image} | fgrep "tid=$((count - 1))"
+ restore_commit_position ${journal}
+ rbd journal status --image ${image} | fgrep "positions=[]"
+ local count1=$(rbd journal inspect --verbose ${journal} |
+ grep -c 'event_type.*AioWrite')
+ test "${count}" -eq "${count1}"
+
+ rbd journal export ${journal} $TMPDIR/journal.export
+ local size=$(stat -c "%s" $TMPDIR/journal.export)
+ test "${size}" -gt 0
+
+ rbd export ${image} $TMPDIR/${image}.export
+
+ local image1=${image}1
+ rbd create --image-feature exclusive-lock --image-feature journaling \
+ --size 128 ${image1}
+ journal1=$(rbd info ${image1} --format=xml 2>/dev/null |
+ $XMLSTARLET sel -t -v "//image/journal")
+
+ save_commit_position ${journal1}
+ rbd journal import --dest ${image1} $TMPDIR/journal.export
+ rbd snap create ${image1}@test
+ restore_commit_position ${journal1}
+ # check that commit position is properly updated: the journal should contain
+ # 14 entries (2 AioFlush + 10 AioWrite + 1 SnapCreate + 1 OpFinish) and
+ # commit position set to tid=14
+ rbd journal inspect --image ${image1} --verbose | awk '
+ /AioFlush/ {a++} # match: "event_type": "AioFlush",
+ /AioWrite/ {w++} # match: "event_type": "AioWrite",
+ /SnapCreate/ {s++} # match: "event_type": "SnapCreate",
+ /OpFinish/ {f++} # match: "event_type": "OpFinish",
+ /entries inspected/ {t=$1; e=$4} # match: 14 entries inspected, 0 errors
+ {print} # for diagnostic
+ END {
+ if (a != 2 || w != 10 || s != 1 || f != 1 || t != 14 || e != 0) exit(1)
+ }
+ '
+
+ rbd export ${image1}@test $TMPDIR/${image1}.export
+ cmp $TMPDIR/${image}.export $TMPDIR/${image1}.export
+
+ rbd journal reset ${journal}
+
+ rbd journal inspect --verbose ${journal} | expect_false grep 'event_type'
+
+ rbd snap purge ${image1}
+ rbd remove ${image1}
+ rbd remove ${image}
+}
+
+
+rbd_assert_eq() {
+ local image=$1
+ local cmd=$2
+ local param=$3
+ local expected_val=$4
+
+ local val=$(rbd --format xml ${cmd} --image ${image} |
+ $XMLSTARLET sel -t -v "${param}")
+ test "${val}" = "${expected_val}"
+}
+
+test_rbd_create()
+{
+ local image=testrbdcreate$$
+
+ rbd create --image-feature exclusive-lock --image-feature journaling \
+ --journal-pool rbd \
+ --journal-object-size 20M \
+ --journal-splay-width 6 \
+ --size 256 ${image}
+
+ rbd_assert_eq ${image} 'journal info' '//journal/order' 25
+ rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
+ rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
+
+ rbd remove ${image}
+}
+
+test_rbd_copy()
+{
+ local src=testrbdcopys$$
+ rbd create --size 256 ${src}
+
+ local image=testrbdcopy$$
+ rbd copy --image-feature exclusive-lock --image-feature journaling \
+ --journal-pool rbd \
+ --journal-object-size 20M \
+ --journal-splay-width 6 \
+ ${src} ${image}
+
+ rbd remove ${src}
+
+ rbd_assert_eq ${image} 'journal info' '//journal/order' 25
+ rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
+ rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
+
+ rbd remove ${image}
+}
+
+test_rbd_deep_copy()
+{
+ local src=testrbdcopys$$
+ rbd create --size 256 ${src}
+ rbd snap create ${src}@snap1
+
+ local dest=testrbdcopy$$
+ rbd deep copy --image-feature exclusive-lock --image-feature journaling \
+ --journal-pool rbd \
+ --journal-object-size 20M \
+ --journal-splay-width 6 \
+ ${src} ${dest}
+
+ rbd snap purge ${src}
+ rbd remove ${src}
+
+ rbd_assert_eq ${dest} 'journal info' '//journal/order' 25
+ rbd_assert_eq ${dest} 'journal info' '//journal/splay_width' 6
+ rbd_assert_eq ${dest} 'journal info' '//journal/object_pool' rbd
+
+ rbd snap purge ${dest}
+ rbd remove ${dest}
+}
+
+test_rbd_clone()
+{
+ local parent=testrbdclonep$$
+ rbd create --image-feature layering --size 256 ${parent}
+ rbd snap create ${parent}@snap
+ rbd snap protect ${parent}@snap
+
+ local image=testrbdclone$$
+ rbd clone --image-feature layering --image-feature exclusive-lock --image-feature journaling \
+ --journal-pool rbd \
+ --journal-object-size 20M \
+ --journal-splay-width 6 \
+ ${parent}@snap ${image}
+
+ rbd_assert_eq ${image} 'journal info' '//journal/order' 25
+ rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
+ rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
+
+ rbd remove ${image}
+ rbd snap unprotect ${parent}@snap
+ rbd snap purge ${parent}
+ rbd remove ${parent}
+}
+
+test_rbd_import()
+{
+ local src=testrbdimports$$
+ rbd create --size 256 ${src}
+
+ rbd export ${src} $TMPDIR/${src}.export
+ rbd remove ${src}
+
+ local image=testrbdimport$$
+ rbd import --image-feature exclusive-lock --image-feature journaling \
+ --journal-pool rbd \
+ --journal-object-size 20M \
+ --journal-splay-width 6 \
+ $TMPDIR/${src}.export ${image}
+
+ rbd_assert_eq ${image} 'journal info' '//journal/order' 25
+ rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
+ rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
+
+ rbd remove ${image}
+}
+
+test_rbd_feature()
+{
+ local image=testrbdfeature$$
+
+ rbd create --image-feature exclusive-lock --size 256 ${image}
+
+ rbd feature enable ${image} journaling \
+ --journal-pool rbd \
+ --journal-object-size 20M \
+ --journal-splay-width 6
+
+ rbd_assert_eq ${image} 'journal info' '//journal/order' 25
+ rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6
+ rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd
+
+ rbd remove ${image}
+}
+
+TESTS+=" rbd_journal"
+TESTS+=" rbd_create"
+TESTS+=" rbd_copy"
+TESTS+=" rbd_clone"
+TESTS+=" rbd_import"
+TESTS+=" rbd_feature"
+
+#
+# "main" follows
+#
+
+tests_to_run=()
+
+cleanup=true
+
+while [[ $# -gt 0 ]]; do
+ opt=$1
+
+ case "$opt" in
+ "-l" )
+ do_list=1
+ ;;
+ "--no-cleanup" )
+ cleanup=false
+ ;;
+ "-t" )
+ shift
+ if [[ -z "$1" ]]; then
+ echo "missing argument to '-t'"
+ usage ;
+ exit 1
+ fi
+ tests_to_run+=" $1"
+ ;;
+ "-h" )
+ usage ;
+ exit 0
+ ;;
+ esac
+ shift
+done
+
+if [[ $do_list -eq 1 ]]; then
+ list_tests ;
+ exit 0
+fi
+
+TMPDIR=/tmp/rbd_journal$$
+mkdir $TMPDIR
+if $cleanup; then
+ trap "rm -fr $TMPDIR" 0
+fi
+
+if test -z "$tests_to_run" ; then
+ tests_to_run="$TESTS"
+fi
+
+for i in $tests_to_run; do
+ set -x
+ test_${i}
+ set +x
+done
+
+echo OK
diff --git a/qa/workunits/rbd/kernel.sh b/qa/workunits/rbd/kernel.sh
new file mode 100755
index 000000000..faa5760ee
--- /dev/null
+++ b/qa/workunits/rbd/kernel.sh
@@ -0,0 +1,100 @@
+#!/usr/bin/env bash
+set -ex
+
+CEPH_SECRET_FILE=${CEPH_SECRET_FILE:-}
+CEPH_ID=${CEPH_ID:-admin}
+SECRET_ARGS=''
+if [ ! -z $CEPH_SECRET_FILE ]; then
+ SECRET_ARGS="--secret $CEPH_SECRET_FILE"
+fi
+
+TMP_FILES="/tmp/img1 /tmp/img1.small /tmp/img1.snap1 /tmp/img1.export /tmp/img1.trunc"
+
+function expect_false() {
+ if "$@"; then return 1; else return 0; fi
+}
+
+function get_device_dir {
+ local POOL=$1
+ local IMAGE=$2
+ local SNAP=$3
+ rbd device list | tail -n +2 | egrep "\s+$POOL\s+$IMAGE\s+$SNAP\s+" |
+ awk '{print $1;}'
+}
+
+function clean_up {
+ [ -e /dev/rbd/rbd/testimg1@snap1 ] &&
+ sudo rbd device unmap /dev/rbd/rbd/testimg1@snap1
+ if [ -e /dev/rbd/rbd/testimg1 ]; then
+ sudo rbd device unmap /dev/rbd/rbd/testimg1
+ rbd snap purge testimg1 || true
+ fi
+ rbd ls | grep testimg1 > /dev/null && rbd rm testimg1 || true
+ sudo rm -f $TMP_FILES
+}
+
+clean_up
+
+trap clean_up INT TERM EXIT
+
+# create an image
+dd if=/bin/sh of=/tmp/img1 bs=1k count=1 seek=10
+dd if=/bin/dd of=/tmp/img1 bs=1k count=10 seek=100
+dd if=/bin/rm of=/tmp/img1 bs=1k count=100 seek=1000
+dd if=/bin/ls of=/tmp/img1 bs=1k seek=10000
+dd if=/bin/ln of=/tmp/img1 bs=1k seek=100000
+dd if=/dev/zero of=/tmp/img1 count=0 seek=150000
+
+# import
+rbd import /tmp/img1 testimg1
+sudo rbd device map testimg1 --user $CEPH_ID $SECRET_ARGS
+
+DEV_ID1=$(get_device_dir rbd testimg1 -)
+echo "dev_id1 = $DEV_ID1"
+cat /sys/bus/rbd/devices/$DEV_ID1/size
+cat /sys/bus/rbd/devices/$DEV_ID1/size | grep 76800000
+
+sudo dd if=/dev/rbd/rbd/testimg1 of=/tmp/img1.export
+cmp /tmp/img1 /tmp/img1.export
+
+# snapshot
+rbd snap create testimg1 --snap=snap1
+sudo rbd device map --snap=snap1 testimg1 --user $CEPH_ID $SECRET_ARGS
+
+DEV_ID2=$(get_device_dir rbd testimg1 snap1)
+cat /sys/bus/rbd/devices/$DEV_ID2/size | grep 76800000
+
+sudo dd if=/dev/rbd/rbd/testimg1@snap1 of=/tmp/img1.snap1
+cmp /tmp/img1 /tmp/img1.snap1
+
+# resize
+rbd resize testimg1 --size=40 --allow-shrink
+cat /sys/bus/rbd/devices/$DEV_ID1/size | grep 41943040
+cat /sys/bus/rbd/devices/$DEV_ID2/size | grep 76800000
+
+sudo dd if=/dev/rbd/rbd/testimg1 of=/tmp/img1.small
+cp /tmp/img1 /tmp/img1.trunc
+truncate -s 41943040 /tmp/img1.trunc
+cmp /tmp/img1.trunc /tmp/img1.small
+
+# rollback expects an unlocked image
+# (acquire and) release the lock as a side effect
+rbd bench --io-type read --io-size 1 --io-threads 1 --io-total 1 testimg1
+
+# rollback and check data again
+rbd snap rollback --snap=snap1 testimg1
+cat /sys/bus/rbd/devices/$DEV_ID1/size | grep 76800000
+cat /sys/bus/rbd/devices/$DEV_ID2/size | grep 76800000
+sudo rm -f /tmp/img1.snap1 /tmp/img1.export
+
+sudo dd if=/dev/rbd/rbd/testimg1@snap1 of=/tmp/img1.snap1
+cmp /tmp/img1 /tmp/img1.snap1
+sudo dd if=/dev/rbd/rbd/testimg1 of=/tmp/img1.export
+cmp /tmp/img1 /tmp/img1.export
+
+# zeros are returned if an image or a snapshot is removed
+expect_false cmp -n 76800000 /dev/rbd/rbd/testimg1@snap1 /dev/zero
+rbd snap rm --snap=snap1 testimg1
+cmp -n 76800000 /dev/rbd/rbd/testimg1@snap1 /dev/zero
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_data_pool.sh b/qa/workunits/rbd/krbd_data_pool.sh
new file mode 100755
index 000000000..8eada88bb
--- /dev/null
+++ b/qa/workunits/rbd/krbd_data_pool.sh
@@ -0,0 +1,206 @@
+#!/usr/bin/env bash
+
+set -ex
+
+export RBD_FORCE_ALLOW_V1=1
+
+function fill_image() {
+ local spec=$1
+
+ local dev
+ dev=$(sudo rbd map $spec)
+ xfs_io -c "pwrite -b $OBJECT_SIZE -S 0x78 -W 0 $IMAGE_SIZE" $dev
+ sudo rbd unmap $dev
+}
+
+function create_clones() {
+ local spec=$1
+
+ rbd snap create $spec@snap
+ rbd snap protect $spec@snap
+
+ local pool=${spec%/*} # pool/image is assumed
+ local image=${spec#*/}
+ local child_pool
+ for child_pool in $pool clonesonly; do
+ rbd clone $spec@snap $child_pool/$pool-$image-clone1
+ rbd clone $spec@snap --data-pool repdata $child_pool/$pool-$image-clone2
+ rbd clone $spec@snap --data-pool ecdata $child_pool/$pool-$image-clone3
+ done
+}
+
+function trigger_copyup() {
+ local spec=$1
+
+ local dev
+ dev=$(sudo rbd map $spec)
+ local i
+ {
+ for ((i = 0; i < $NUM_OBJECTS; i++)); do
+ echo pwrite -b $OBJECT_SIZE -S 0x59 $((i * OBJECT_SIZE + OBJECT_SIZE / 2)) $((OBJECT_SIZE / 2))
+ done
+ echo fsync
+ echo quit
+ } | xfs_io $dev
+ sudo rbd unmap $dev
+}
+
+function compare() {
+ local spec=$1
+ local object=$2
+
+ local dev
+ dev=$(sudo rbd map $spec)
+ local i
+ for ((i = 0; i < $NUM_OBJECTS; i++)); do
+ dd if=$dev bs=$OBJECT_SIZE count=1 skip=$i | cmp $object -
+ done
+ sudo rbd unmap $dev
+}
+
+function mkfs_and_mount() {
+ local spec=$1
+
+ local dev
+ dev=$(sudo rbd map $spec)
+ blkdiscard $dev
+ mkfs.ext4 -q -E nodiscard $dev
+ sudo mount $dev /mnt
+ sudo umount /mnt
+ sudo rbd unmap $dev
+}
+
+function list_HEADs() {
+ local pool=$1
+
+ rados -p $pool ls | while read obj; do
+ if rados -p $pool stat $obj >/dev/null 2>&1; then
+ echo $obj
+ fi
+ done
+}
+
+function count_data_objects() {
+ local spec=$1
+
+ local pool
+ pool=$(rbd info $spec | grep 'data_pool: ' | awk '{ print $NF }')
+ if [[ -z $pool ]]; then
+ pool=${spec%/*} # pool/image is assumed
+ fi
+
+ local prefix
+ prefix=$(rbd info $spec | grep 'block_name_prefix: ' | awk '{ print $NF }')
+ rados -p $pool ls | grep -c $prefix
+}
+
+function get_num_clones() {
+ local pool=$1
+
+ rados -p $pool --format=json df |
+ python3 -c 'import sys, json; print(json.load(sys.stdin)["pools"][0]["num_object_clones"])'
+}
+
+ceph osd pool create repdata 24 24
+rbd pool init repdata
+ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2
+ceph osd pool create ecdata 24 24 erasure teuthologyprofile
+rbd pool init ecdata
+ceph osd pool set ecdata allow_ec_overwrites true
+ceph osd pool create rbdnonzero 24 24
+rbd pool init rbdnonzero
+ceph osd pool create clonesonly 24 24
+rbd pool init clonesonly
+
+for pool in rbd rbdnonzero; do
+ rbd create --size 200 --image-format 1 $pool/img0
+ rbd create --size 200 $pool/img1
+ rbd create --size 200 --data-pool repdata $pool/img2
+ rbd create --size 200 --data-pool ecdata $pool/img3
+done
+
+IMAGE_SIZE=$(rbd info --format=json img1 | python3 -c 'import sys, json; print(json.load(sys.stdin)["size"])')
+OBJECT_SIZE=$(rbd info --format=json img1 | python3 -c 'import sys, json; print(json.load(sys.stdin)["object_size"])')
+NUM_OBJECTS=$((IMAGE_SIZE / OBJECT_SIZE))
+[[ $((IMAGE_SIZE % OBJECT_SIZE)) -eq 0 ]]
+
+OBJECT_X=$(mktemp) # xxxx
+xfs_io -c "pwrite -b $OBJECT_SIZE -S 0x78 0 $OBJECT_SIZE" $OBJECT_X
+
+OBJECT_XY=$(mktemp) # xxYY
+xfs_io -c "pwrite -b $OBJECT_SIZE -S 0x78 0 $((OBJECT_SIZE / 2))" \
+ -c "pwrite -b $OBJECT_SIZE -S 0x59 $((OBJECT_SIZE / 2)) $((OBJECT_SIZE / 2))" \
+ $OBJECT_XY
+
+for pool in rbd rbdnonzero; do
+ for i in {0..3}; do
+ fill_image $pool/img$i
+ if [[ $i -ne 0 ]]; then
+ create_clones $pool/img$i
+ for child_pool in $pool clonesonly; do
+ for j in {1..3}; do
+ trigger_copyup $child_pool/$pool-img$i-clone$j
+ done
+ done
+ fi
+ done
+done
+
+# rbd_directory, rbd_children, rbd_info + img0 header + ...
+NUM_META_RBDS=$((3 + 1 + 3 * (1*2 + 3*2)))
+# rbd_directory, rbd_children, rbd_info + ...
+NUM_META_CLONESONLY=$((3 + 2 * 3 * (3*2)))
+
+[[ $(rados -p rbd ls | wc -l) -eq $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]]
+[[ $(rados -p repdata ls | wc -l) -eq $((1 + 14 * NUM_OBJECTS)) ]]
+[[ $(rados -p ecdata ls | wc -l) -eq $((1 + 14 * NUM_OBJECTS)) ]]
+[[ $(rados -p rbdnonzero ls | wc -l) -eq $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]]
+[[ $(rados -p clonesonly ls | wc -l) -eq $((NUM_META_CLONESONLY + 6 * NUM_OBJECTS)) ]]
+
+for pool in rbd rbdnonzero; do
+ for i in {0..3}; do
+ [[ $(count_data_objects $pool/img$i) -eq $NUM_OBJECTS ]]
+ if [[ $i -ne 0 ]]; then
+ for child_pool in $pool clonesonly; do
+ for j in {1..3}; do
+ [[ $(count_data_objects $child_pool/$pool-img$i-clone$j) -eq $NUM_OBJECTS ]]
+ done
+ done
+ fi
+ done
+done
+
+[[ $(get_num_clones rbd) -eq 0 ]]
+[[ $(get_num_clones repdata) -eq 0 ]]
+[[ $(get_num_clones ecdata) -eq 0 ]]
+[[ $(get_num_clones rbdnonzero) -eq 0 ]]
+[[ $(get_num_clones clonesonly) -eq 0 ]]
+
+for pool in rbd rbdnonzero; do
+ for i in {0..3}; do
+ compare $pool/img$i $OBJECT_X
+ mkfs_and_mount $pool/img$i
+ if [[ $i -ne 0 ]]; then
+ for child_pool in $pool clonesonly; do
+ for j in {1..3}; do
+ compare $child_pool/$pool-img$i-clone$j $OBJECT_XY
+ done
+ done
+ fi
+ done
+done
+
+# mkfs_and_mount should discard some objects everywhere but in clonesonly
+[[ $(list_HEADs rbd | wc -l) -lt $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]]
+[[ $(list_HEADs repdata | wc -l) -lt $((1 + 14 * NUM_OBJECTS)) ]]
+[[ $(list_HEADs ecdata | wc -l) -lt $((1 + 14 * NUM_OBJECTS)) ]]
+[[ $(list_HEADs rbdnonzero | wc -l) -lt $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]]
+[[ $(list_HEADs clonesonly | wc -l) -eq $((NUM_META_CLONESONLY + 6 * NUM_OBJECTS)) ]]
+
+[[ $(get_num_clones rbd) -eq $NUM_OBJECTS ]]
+[[ $(get_num_clones repdata) -eq $((2 * NUM_OBJECTS)) ]]
+[[ $(get_num_clones ecdata) -eq $((2 * NUM_OBJECTS)) ]]
+[[ $(get_num_clones rbdnonzero) -eq $NUM_OBJECTS ]]
+[[ $(get_num_clones clonesonly) -eq 0 ]]
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_exclusive_option.sh b/qa/workunits/rbd/krbd_exclusive_option.sh
new file mode 100755
index 000000000..f8493ce98
--- /dev/null
+++ b/qa/workunits/rbd/krbd_exclusive_option.sh
@@ -0,0 +1,233 @@
+#!/usr/bin/env bash
+
+set -ex
+
+function expect_false() {
+ if "$@"; then return 1; else return 0; fi
+}
+
+function assert_locked() {
+ local dev_id="${1#/dev/rbd}"
+
+ local client_addr
+ client_addr="$(< $SYSFS_DIR/$dev_id/client_addr)"
+
+ local client_id
+ client_id="$(< $SYSFS_DIR/$dev_id/client_id)"
+ # client4324 -> client.4324
+ client_id="client.${client_id#client}"
+
+ local watch_cookie
+ watch_cookie="$(rados -p rbd listwatchers rbd_header.$IMAGE_ID |
+ grep $client_id | cut -d ' ' -f 3 | cut -d '=' -f 2)"
+ [[ $(echo -n "$watch_cookie" | grep -c '^') -eq 1 ]]
+
+ local actual
+ actual="$(rados -p rbd --format=json lock info rbd_header.$IMAGE_ID rbd_lock |
+ python3 -m json.tool --sort-keys)"
+
+ local expected
+ expected="$(cat <<EOF | python3 -m json.tool --sort-keys
+{
+ "lockers": [
+ {
+ "addr": "$client_addr",
+ "cookie": "auto $watch_cookie",
+ "description": "",
+ "expiration": "0.000000",
+ "name": "$client_id"
+ }
+ ],
+ "name": "rbd_lock",
+ "tag": "internal",
+ "type": "exclusive"
+}
+EOF
+ )"
+
+ [ "$actual" = "$expected" ]
+}
+
+function assert_unlocked() {
+ rados -p rbd --format=json lock info rbd_header.$IMAGE_ID rbd_lock |
+ grep '"lockers":\[\]'
+}
+
+function blocklist_add() {
+ local dev_id="${1#/dev/rbd}"
+
+ local client_addr
+ client_addr="$(< $SYSFS_DIR/$dev_id/client_addr)"
+
+ ceph osd blocklist add $client_addr
+}
+
+SYSFS_DIR="/sys/bus/rbd/devices"
+IMAGE_NAME="exclusive-option-test"
+
+rbd create --size 1 --image-feature '' $IMAGE_NAME
+
+IMAGE_ID="$(rbd info --format=json $IMAGE_NAME |
+ python3 -c "import sys, json; print(json.load(sys.stdin)['block_name_prefix'].split('.')[1])")"
+
+DEV=$(sudo rbd map $IMAGE_NAME)
+assert_unlocked
+sudo rbd unmap $DEV
+assert_unlocked
+
+expect_false sudo rbd map -o exclusive $IMAGE_NAME
+assert_unlocked
+
+expect_false sudo rbd map -o lock_on_read $IMAGE_NAME
+assert_unlocked
+
+rbd feature enable $IMAGE_NAME exclusive-lock
+rbd snap create $IMAGE_NAME@snap
+
+DEV=$(sudo rbd map $IMAGE_NAME)
+assert_locked $DEV
+[[ $(blockdev --getro $DEV) -eq 0 ]]
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map $IMAGE_NAME@snap)
+assert_unlocked
+[[ $(blockdev --getro $DEV) -eq 1 ]]
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map -o ro $IMAGE_NAME)
+assert_unlocked
+[[ $(blockdev --getro $DEV) -eq 1 ]]
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map -o exclusive $IMAGE_NAME)
+assert_locked $DEV
+[[ $(blockdev --getro $DEV) -eq 0 ]]
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map -o exclusive $IMAGE_NAME@snap)
+assert_unlocked
+[[ $(blockdev --getro $DEV) -eq 1 ]]
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map -o exclusive,ro $IMAGE_NAME)
+assert_unlocked
+[[ $(blockdev --getro $DEV) -eq 1 ]]
+sudo rbd unmap $DEV
+assert_unlocked
+
+# alternate syntax
+DEV=$(sudo rbd map --exclusive --read-only $IMAGE_NAME)
+assert_unlocked
+[[ $(blockdev --getro $DEV) -eq 1 ]]
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map $IMAGE_NAME)
+assert_locked $DEV
+OTHER_DEV=$(sudo rbd map -o noshare $IMAGE_NAME)
+assert_locked $OTHER_DEV
+dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
+assert_locked $DEV
+dd if=/dev/urandom of=$OTHER_DEV bs=4k count=10 oflag=direct
+assert_locked $OTHER_DEV
+sudo rbd unmap $DEV
+sudo rbd unmap $OTHER_DEV
+assert_unlocked
+
+DEV=$(sudo rbd map $IMAGE_NAME)
+assert_locked $DEV
+OTHER_DEV=$(sudo rbd map -o noshare,exclusive $IMAGE_NAME)
+assert_locked $OTHER_DEV
+dd if=$DEV of=/dev/null bs=4k count=10 iflag=direct
+expect_false dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
+assert_locked $OTHER_DEV
+sudo rbd unmap $OTHER_DEV
+assert_unlocked
+dd if=$DEV of=/dev/null bs=4k count=10 iflag=direct
+assert_unlocked
+dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
+assert_locked $DEV
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map -o lock_on_read $IMAGE_NAME)
+assert_locked $DEV
+OTHER_DEV=$(sudo rbd map -o noshare,exclusive $IMAGE_NAME)
+assert_locked $OTHER_DEV
+expect_false dd if=$DEV of=/dev/null bs=4k count=10 iflag=direct
+expect_false dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
+sudo udevadm settle
+assert_locked $OTHER_DEV
+sudo rbd unmap $OTHER_DEV
+assert_unlocked
+dd if=$DEV of=/dev/null bs=4k count=10 iflag=direct
+assert_locked $DEV
+dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
+assert_locked $DEV
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map -o exclusive $IMAGE_NAME)
+assert_locked $DEV
+expect_false sudo rbd map -o noshare $IMAGE_NAME
+assert_locked $DEV
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map -o exclusive $IMAGE_NAME)
+assert_locked $DEV
+expect_false sudo rbd map -o noshare,exclusive $IMAGE_NAME
+assert_locked $DEV
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map $IMAGE_NAME)
+assert_locked $DEV
+rbd resize --size 1G $IMAGE_NAME
+assert_unlocked
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map -o exclusive $IMAGE_NAME)
+assert_locked $DEV
+expect_false rbd resize --size 2G $IMAGE_NAME
+assert_locked $DEV
+sudo rbd unmap $DEV
+assert_unlocked
+
+DEV=$(sudo rbd map $IMAGE_NAME)
+assert_locked $DEV
+dd if=/dev/urandom of=$DEV bs=4k count=10 oflag=direct
+{ sleep 10; blocklist_add $DEV; } &
+PID=$!
+expect_false dd if=/dev/urandom of=$DEV bs=4k count=200000 oflag=direct
+wait $PID
+# break lock
+OTHER_DEV=$(sudo rbd map -o noshare $IMAGE_NAME)
+assert_locked $OTHER_DEV
+sudo rbd unmap $DEV
+assert_locked $OTHER_DEV
+sudo rbd unmap $OTHER_DEV
+assert_unlocked
+
+# induce a watch error after 30 seconds
+DEV=$(sudo rbd map -o exclusive,osdkeepalive=60 $IMAGE_NAME)
+assert_locked $DEV
+OLD_WATCHER="$(rados -p rbd listwatchers rbd_header.$IMAGE_ID)"
+sleep 40
+assert_locked $DEV
+NEW_WATCHER="$(rados -p rbd listwatchers rbd_header.$IMAGE_ID)"
+# same client_id, old cookie < new cookie
+[ "$(echo "$OLD_WATCHER" | cut -d ' ' -f 2)" = \
+ "$(echo "$NEW_WATCHER" | cut -d ' ' -f 2)" ]
+[[ $(echo "$OLD_WATCHER" | cut -d ' ' -f 3 | cut -d '=' -f 2) -lt \
+ $(echo "$NEW_WATCHER" | cut -d ' ' -f 3 | cut -d '=' -f 2) ]]
+sudo rbd unmap $DEV
+assert_unlocked
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_fallocate.sh b/qa/workunits/rbd/krbd_fallocate.sh
new file mode 100755
index 000000000..79efa1a8b
--- /dev/null
+++ b/qa/workunits/rbd/krbd_fallocate.sh
@@ -0,0 +1,151 @@
+#!/usr/bin/env bash
+
+# - fallocate -z deallocates because BLKDEV_ZERO_NOUNMAP hint is ignored by
+# krbd
+#
+# - big unaligned blkdiscard and fallocate -z/-p leave the objects in place
+
+set -ex
+
+# no blkdiscard(8) in trusty
+function py_blkdiscard() {
+ local offset=$1
+
+ python3 <<EOF
+import fcntl, struct
+BLKDISCARD = 0x1277
+with open('$DEV', 'w') as dev:
+ fcntl.ioctl(dev, BLKDISCARD, struct.pack('QQ', $offset, $IMAGE_SIZE - $offset))
+EOF
+}
+
+# fallocate(1) in trusty doesn't support -z/-p
+function py_fallocate() {
+ local mode=$1
+ local offset=$2
+
+ python3 <<EOF
+import os, ctypes, ctypes.util
+FALLOC_FL_KEEP_SIZE = 0x01
+FALLOC_FL_PUNCH_HOLE = 0x02
+FALLOC_FL_ZERO_RANGE = 0x10
+libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
+with open('$DEV', 'w') as dev:
+ if libc.fallocate(dev.fileno(), ctypes.c_int($mode), ctypes.c_long($offset), ctypes.c_long($IMAGE_SIZE - $offset)):
+ err = ctypes.get_errno()
+ raise OSError(err, os.strerror(err))
+EOF
+}
+
+function allocate() {
+ xfs_io -c "pwrite -b $OBJECT_SIZE -W 0 $IMAGE_SIZE" $DEV
+ assert_allocated
+}
+
+function assert_allocated() {
+ cmp <(od -xAx $DEV) - <<EOF
+000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd
+*
+$(printf %x $IMAGE_SIZE)
+EOF
+ [[ $(rados -p rbd ls | grep -c rbd_data.$IMAGE_ID) -eq $NUM_OBJECTS ]]
+}
+
+function assert_zeroes() {
+ local num_objects_expected=$1
+
+ cmp <(od -xAx $DEV) - <<EOF
+000000 0000 0000 0000 0000 0000 0000 0000 0000
+*
+$(printf %x $IMAGE_SIZE)
+EOF
+ [[ $(rados -p rbd ls | grep -c rbd_data.$IMAGE_ID) -eq $num_objects_expected ]]
+}
+
+function assert_zeroes_unaligned() {
+ local num_objects_expected=$1
+
+ cmp <(od -xAx $DEV) - <<EOF
+000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd
+*
+$(printf %x $((OBJECT_SIZE / 2))) 0000 0000 0000 0000 0000 0000 0000 0000
+*
+$(printf %x $IMAGE_SIZE)
+EOF
+ [[ $(rados -p rbd ls | grep -c rbd_data.$IMAGE_ID) -eq $num_objects_expected ]]
+ for ((i = 0; i < $num_objects_expected; i++)); do
+ rados -p rbd stat rbd_data.$IMAGE_ID.$(printf %016x $i) | egrep "(size $((OBJECT_SIZE / 2)))|(size 0)"
+ done
+}
+
+IMAGE_NAME="fallocate-test"
+
+rbd create --size 200 $IMAGE_NAME
+
+IMAGE_SIZE=$(rbd info --format=json $IMAGE_NAME | python3 -c 'import sys, json; print(json.load(sys.stdin)["size"])')
+OBJECT_SIZE=$(rbd info --format=json $IMAGE_NAME | python3 -c 'import sys, json; print(json.load(sys.stdin)["object_size"])')
+NUM_OBJECTS=$((IMAGE_SIZE / OBJECT_SIZE))
+[[ $((IMAGE_SIZE % OBJECT_SIZE)) -eq 0 ]]
+
+IMAGE_ID="$(rbd info --format=json $IMAGE_NAME |
+ python3 -c "import sys, json; print(json.load(sys.stdin)['block_name_prefix'].split('.')[1])")"
+
+DEV=$(sudo rbd map $IMAGE_NAME)
+
+# make sure -ENOENT is hidden
+assert_zeroes 0
+py_blkdiscard 0
+assert_zeroes 0
+
+# blkdev_issue_discard
+allocate
+py_blkdiscard 0
+assert_zeroes 0
+
+# blkdev_issue_zeroout w/ BLKDEV_ZERO_NOUNMAP
+allocate
+py_fallocate FALLOC_FL_ZERO_RANGE\|FALLOC_FL_KEEP_SIZE 0
+assert_zeroes 0
+
+# blkdev_issue_zeroout w/ BLKDEV_ZERO_NOFALLBACK
+allocate
+py_fallocate FALLOC_FL_PUNCH_HOLE\|FALLOC_FL_KEEP_SIZE 0
+assert_zeroes 0
+
+# unaligned blkdev_issue_discard
+allocate
+py_blkdiscard $((OBJECT_SIZE / 2))
+assert_zeroes_unaligned $NUM_OBJECTS
+
+# unaligned blkdev_issue_zeroout w/ BLKDEV_ZERO_NOUNMAP
+allocate
+py_fallocate FALLOC_FL_ZERO_RANGE\|FALLOC_FL_KEEP_SIZE $((OBJECT_SIZE / 2))
+assert_zeroes_unaligned $NUM_OBJECTS
+
+# unaligned blkdev_issue_zeroout w/ BLKDEV_ZERO_NOFALLBACK
+allocate
+py_fallocate FALLOC_FL_PUNCH_HOLE\|FALLOC_FL_KEEP_SIZE $((OBJECT_SIZE / 2))
+assert_zeroes_unaligned $NUM_OBJECTS
+
+sudo rbd unmap $DEV
+
+DEV=$(sudo rbd map -o notrim $IMAGE_NAME)
+
+# blkdev_issue_discard
+allocate
+py_blkdiscard 0 |& grep 'Operation not supported'
+assert_allocated
+
+# blkdev_issue_zeroout w/ BLKDEV_ZERO_NOUNMAP
+allocate
+py_fallocate FALLOC_FL_ZERO_RANGE\|FALLOC_FL_KEEP_SIZE 0
+assert_zeroes $NUM_OBJECTS
+
+# blkdev_issue_zeroout w/ BLKDEV_ZERO_NOFALLBACK
+allocate
+py_fallocate FALLOC_FL_PUNCH_HOLE\|FALLOC_FL_KEEP_SIZE 0 |& grep 'Operation not supported'
+assert_allocated
+
+sudo rbd unmap $DEV
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_huge_osdmap.sh b/qa/workunits/rbd/krbd_huge_osdmap.sh
new file mode 100755
index 000000000..0a550d674
--- /dev/null
+++ b/qa/workunits/rbd/krbd_huge_osdmap.sh
@@ -0,0 +1,51 @@
+#!/usr/bin/env bash
+
+# This is a test for https://tracker.ceph.com/issues/40481.
+#
+# An osdmap with 60000 slots encodes to ~16M, of which the ignored portion
+# is ~13M. However in-memory osdmap is larger than ~3M: in-memory osd_addr
+# array for 60000 OSDs is ~8M because of sockaddr_storage.
+#
+# Set mon_max_osd = 60000 in ceph.conf.
+
+set -ex
+
+function expect_false() {
+ if "$@"; then return 1; else return 0; fi
+}
+
+function run_test() {
+ local dev
+
+ # initially tiny, grow via incrementals
+ dev=$(sudo rbd map img)
+ for max in 8 60 600 6000 60000; do
+ ceph osd setmaxosd $max
+ expect_false sudo rbd map wait_for/latest_osdmap
+ xfs_io -c 'pwrite -w 0 12M' $DEV
+ done
+ ceph osd getcrushmap -o /dev/stdout | ceph osd setcrushmap -i /dev/stdin
+ expect_false sudo rbd map wait_for/latest_osdmap
+ xfs_io -c 'pwrite -w 0 12M' $DEV
+ sudo rbd unmap $dev
+
+ # initially huge, shrink via incrementals
+ dev=$(sudo rbd map img)
+ for max in 60000 6000 600 60 8; do
+ ceph osd setmaxosd $max
+ expect_false sudo rbd map wait_for/latest_osdmap
+ xfs_io -c 'pwrite -w 0 12M' $DEV
+ done
+ ceph osd getcrushmap -o /dev/stdout | ceph osd setcrushmap -i /dev/stdin
+ expect_false sudo rbd map wait_for/latest_osdmap
+ xfs_io -c 'pwrite -w 0 12M' $DEV
+ sudo rbd unmap $dev
+}
+
+rbd create --size 12M img
+run_test
+# repeat with primary affinity (adds an extra array)
+ceph osd primary-affinity osd.0 0.5
+run_test
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_latest_osdmap_on_map.sh b/qa/workunits/rbd/krbd_latest_osdmap_on_map.sh
new file mode 100755
index 000000000..f70f38639
--- /dev/null
+++ b/qa/workunits/rbd/krbd_latest_osdmap_on_map.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+set -ex
+
+function run_test() {
+ ceph osd pool create foo 12
+ rbd pool init foo
+ rbd create --size 1 foo/img
+
+ local dev
+ dev=$(sudo rbd map foo/img)
+ sudo rbd unmap $dev
+
+ ceph osd pool delete foo foo --yes-i-really-really-mean-it
+}
+
+NUM_ITER=20
+
+for ((i = 0; i < $NUM_ITER; i++)); do
+ run_test
+done
+
+rbd create --size 1 img
+DEV=$(sudo rbd map img)
+for ((i = 0; i < $NUM_ITER; i++)); do
+ run_test
+done
+sudo rbd unmap $DEV
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_namespaces.sh b/qa/workunits/rbd/krbd_namespaces.sh
new file mode 100755
index 000000000..0273d8499
--- /dev/null
+++ b/qa/workunits/rbd/krbd_namespaces.sh
@@ -0,0 +1,116 @@
+#!/usr/bin/env bash
+
+set -ex
+
+function get_block_name_prefix() {
+ rbd info --format=json $1 | python3 -c "import sys, json; print(json.load(sys.stdin)['block_name_prefix'])"
+}
+
+function do_pwrite() {
+ local spec=$1
+ local old_byte=$2
+ local new_byte=$3
+
+ local dev
+ dev=$(sudo rbd map $spec)
+ cmp <(dd if=/dev/zero bs=1M count=10 | tr \\000 \\$old_byte) $dev
+ xfs_io -c "pwrite -b 1M -S $new_byte 0 10M" $dev
+ sudo rbd unmap $dev
+}
+
+function do_cmp() {
+ local spec=$1
+ local byte=$2
+
+ local dev
+ dev=$(sudo rbd map $spec)
+ cmp <(dd if=/dev/zero bs=1M count=10 | tr \\000 \\$byte) $dev
+ sudo rbd unmap $dev
+}
+
+function gen_child_specs() {
+ local i=$1
+
+ local child_specs="foo/img$i-clone1 foo/img$i-clone2 foo/ns1/img$i-clone1 foo/ns1/img$i-clone2"
+ if [[ $i -ge 3 ]]; then
+ child_specs="$child_specs foo/ns2/img$i-clone1 foo/ns2/img$i-clone2"
+ fi
+ echo $child_specs
+}
+
+ceph osd pool create foo 12
+rbd pool init foo
+ceph osd pool create bar 12
+rbd pool init bar
+
+ceph osd set-require-min-compat-client nautilus
+rbd namespace create foo/ns1
+rbd namespace create foo/ns2
+
+SPECS=(foo/img1 foo/img2 foo/ns1/img3 foo/ns1/img4)
+
+COUNT=1
+for spec in "${SPECS[@]}"; do
+ if [[ $spec =~ img1|img3 ]]; then
+ rbd create --size 10 $spec
+ else
+ rbd create --size 10 --data-pool bar $spec
+ fi
+ do_pwrite $spec 000 $(printf %03d $COUNT)
+ rbd snap create $spec@snap
+ COUNT=$((COUNT + 1))
+done
+for i in {1..4}; do
+ for child_spec in $(gen_child_specs $i); do
+ if [[ $child_spec =~ clone1 ]]; then
+ rbd clone ${SPECS[i - 1]}@snap $child_spec
+ else
+ rbd clone --data-pool bar ${SPECS[i - 1]}@snap $child_spec
+ fi
+ do_pwrite $child_spec $(printf %03d $i) $(printf %03d $COUNT)
+ COUNT=$((COUNT + 1))
+ done
+done
+
+[[ $(rados -p foo ls | grep -c $(get_block_name_prefix foo/img1)) -eq 3 ]]
+[[ $(rados -p bar ls | grep -c $(get_block_name_prefix foo/img2)) -eq 3 ]]
+[[ $(rados -p foo -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img3)) -eq 3 ]]
+[[ $(rados -p bar -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img4)) -eq 3 ]]
+
+[[ $(rados -p foo ls | grep -c $(get_block_name_prefix foo/img1-clone1)) -eq 3 ]]
+[[ $(rados -p bar ls | grep -c $(get_block_name_prefix foo/img1-clone2)) -eq 3 ]]
+[[ $(rados -p foo -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img1-clone1)) -eq 3 ]]
+[[ $(rados -p bar -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img1-clone2)) -eq 3 ]]
+
+[[ $(rados -p foo ls | grep -c $(get_block_name_prefix foo/img2-clone1)) -eq 3 ]]
+[[ $(rados -p bar ls | grep -c $(get_block_name_prefix foo/img2-clone2)) -eq 3 ]]
+[[ $(rados -p foo -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img2-clone1)) -eq 3 ]]
+[[ $(rados -p bar -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img2-clone2)) -eq 3 ]]
+
+[[ $(rados -p foo ls | grep -c $(get_block_name_prefix foo/img3-clone1)) -eq 3 ]]
+[[ $(rados -p bar ls | grep -c $(get_block_name_prefix foo/img3-clone2)) -eq 3 ]]
+[[ $(rados -p foo -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img3-clone1)) -eq 3 ]]
+[[ $(rados -p bar -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img3-clone2)) -eq 3 ]]
+[[ $(rados -p foo -N ns2 ls | grep -c $(get_block_name_prefix foo/ns2/img3-clone1)) -eq 3 ]]
+[[ $(rados -p bar -N ns2 ls | grep -c $(get_block_name_prefix foo/ns2/img3-clone2)) -eq 3 ]]
+
+[[ $(rados -p foo ls | grep -c $(get_block_name_prefix foo/img4-clone1)) -eq 3 ]]
+[[ $(rados -p bar ls | grep -c $(get_block_name_prefix foo/img4-clone2)) -eq 3 ]]
+[[ $(rados -p foo -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img4-clone1)) -eq 3 ]]
+[[ $(rados -p bar -N ns1 ls | grep -c $(get_block_name_prefix foo/ns1/img4-clone2)) -eq 3 ]]
+[[ $(rados -p foo -N ns2 ls | grep -c $(get_block_name_prefix foo/ns2/img4-clone1)) -eq 3 ]]
+[[ $(rados -p bar -N ns2 ls | grep -c $(get_block_name_prefix foo/ns2/img4-clone2)) -eq 3 ]]
+
+COUNT=1
+for spec in "${SPECS[@]}"; do
+ do_cmp $spec $(printf %03d $COUNT)
+ COUNT=$((COUNT + 1))
+done
+for i in {1..4}; do
+ for child_spec in $(gen_child_specs $i); do
+ do_cmp $child_spec $(printf %03d $COUNT)
+ COUNT=$((COUNT + 1))
+ done
+done
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_rxbounce.sh b/qa/workunits/rbd/krbd_rxbounce.sh
new file mode 100755
index 000000000..ad00e3f96
--- /dev/null
+++ b/qa/workunits/rbd/krbd_rxbounce.sh
@@ -0,0 +1,103 @@
+#!/usr/bin/env bash
+
+set -ex
+
+rbd create --size 256 img
+
+IMAGE_SIZE=$(rbd info --format=json img | python3 -c 'import sys, json; print(json.load(sys.stdin)["size"])')
+OBJECT_SIZE=$(rbd info --format=json img | python3 -c 'import sys, json; print(json.load(sys.stdin)["object_size"])')
+NUM_OBJECTS=$((IMAGE_SIZE / OBJECT_SIZE))
+[[ $((IMAGE_SIZE % OBJECT_SIZE)) -eq 0 ]]
+OP_SIZE=16384
+
+DEV=$(sudo rbd map img)
+{
+ for ((i = 0; i < $NUM_OBJECTS; i++)); do
+ echo pwrite -b $OP_SIZE -S $i $((i * OBJECT_SIZE)) $OP_SIZE
+ done
+ echo fsync
+ echo quit
+} | xfs_io $DEV
+sudo rbd unmap $DEV
+
+g++ -xc++ -o racereads - -lpthread <<EOF
+#include <assert.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <thread>
+#include <vector>
+
+const int object_size = $OBJECT_SIZE;
+const int num_objects = $NUM_OBJECTS;
+const int read_len = $OP_SIZE;
+const int num_reads = 1024;
+
+int main() {
+ int fd = open("$DEV", O_DIRECT | O_RDONLY);
+ assert(fd >= 0);
+
+ void *buf;
+ int r = posix_memalign(&buf, 512, read_len);
+ assert(r == 0);
+
+ std::vector<std::thread> threads;
+ for (int i = 0; i < num_objects; i++) {
+ threads.emplace_back(
+ [fd, buf, read_off = static_cast<off_t>(i) * object_size]() {
+ for (int i = 0; i < num_reads; i++) {
+ auto len = pread(fd, buf, read_len, read_off);
+ assert(len == read_len);
+ }
+ });
+ }
+
+ for (auto &t : threads) {
+ t.join();
+ }
+}
+EOF
+
+DEV=$(sudo rbd map -o ms_mode=legacy img)
+sudo dmesg -C
+./racereads
+[[ $(dmesg | grep -c 'libceph: osd.* bad crc/signature') -gt 100 ]]
+sudo rbd unmap $DEV
+
+DEV=$(sudo rbd map -o ms_mode=legacy,rxbounce img)
+sudo dmesg -C
+./racereads
+[[ $(dmesg | grep -c 'libceph: osd.* bad crc/signature') -eq 0 ]]
+sudo rbd unmap $DEV
+
+DEV=$(sudo rbd map -o ms_mode=crc img)
+sudo dmesg -C
+./racereads
+[[ $(dmesg | grep -c 'libceph: osd.* integrity error') -gt 100 ]]
+sudo rbd unmap $DEV
+
+DEV=$(sudo rbd map -o ms_mode=crc,rxbounce img)
+sudo dmesg -C
+./racereads
+[[ $(dmesg | grep -c 'libceph: osd.* integrity error') -eq 0 ]]
+sudo rbd unmap $DEV
+
+# rxbounce is a no-op for secure mode
+DEV=$(sudo rbd map -o ms_mode=secure img)
+sudo dmesg -C
+./racereads
+[[ $(dmesg | grep -c 'libceph: osd.* integrity error') -eq 0 ]]
+sudo rbd unmap $DEV
+
+DEV=$(sudo rbd map -o ms_mode=secure,rxbounce img)
+sudo dmesg -C
+./racereads
+[[ $(dmesg | grep -c 'libceph: osd.* integrity error') -eq 0 ]]
+sudo rbd unmap $DEV
+
+rbd rm img
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_stable_writes.sh b/qa/workunits/rbd/krbd_stable_writes.sh
new file mode 100755
index 000000000..d00e5fd04
--- /dev/null
+++ b/qa/workunits/rbd/krbd_stable_writes.sh
@@ -0,0 +1,141 @@
+#!/usr/bin/env bash
+
+set -ex
+
+function assert_dm() {
+ local name=$1
+ local val=$2
+
+ local devno
+ devno=$(sudo dmsetup info -c --noheadings -o Major,Minor $name)
+ grep -q $val /sys/dev/block/$devno/queue/stable_writes
+}
+
+function dmsetup_reload() {
+ local name=$1
+
+ local table
+ table=$(</dev/stdin)
+
+ sudo dmsetup suspend $name
+ echo "$table" | sudo dmsetup reload $name
+ sudo dmsetup resume $name
+}
+
+IMAGE_NAME="stable-writes-test"
+
+rbd create --size 1 $IMAGE_NAME
+DEV=$(sudo rbd map $IMAGE_NAME)
+
+fallocate -l 1M loopfile
+LOOP_DEV=$(sudo losetup -f --show loopfile)
+
+[[ $(blockdev --getsize64 $DEV) -eq 1048576 ]]
+grep -q 1 /sys/block/${DEV#/dev/}/queue/stable_writes
+
+rbd resize --size 2 $IMAGE_NAME
+[[ $(blockdev --getsize64 $DEV) -eq 2097152 ]]
+grep -q 1 /sys/block/${DEV#/dev/}/queue/stable_writes
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $LOOP_DEV 0
+EOF
+assert_dm tbl 0
+sudo dmsetup remove tbl
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $DEV 0
+EOF
+assert_dm tbl 1
+sudo dmsetup remove tbl
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $LOOP_DEV 0
+1024 2048 error
+EOF
+assert_dm tbl 0
+sudo dmsetup remove tbl
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $DEV 0
+1024 2048 error
+EOF
+assert_dm tbl 1
+sudo dmsetup remove tbl
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $LOOP_DEV 0
+1024 2048 linear $DEV 0
+EOF
+assert_dm tbl 1
+sudo dmsetup remove tbl
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $DEV 0
+1024 2048 linear $LOOP_DEV 0
+EOF
+assert_dm tbl 1
+sudo dmsetup remove tbl
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $LOOP_DEV 0
+EOF
+assert_dm tbl 0
+cat <<EOF | dmsetup_reload tbl
+0 1024 linear $LOOP_DEV 0
+1024 2048 linear $DEV 0
+EOF
+assert_dm tbl 1
+cat <<EOF | dmsetup_reload tbl
+0 1024 linear $LOOP_DEV 0
+EOF
+assert_dm tbl 0
+sudo dmsetup remove tbl
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $DEV 0
+EOF
+assert_dm tbl 1
+cat <<EOF | dmsetup_reload tbl
+0 1024 linear $DEV 0
+1024 2048 linear $LOOP_DEV 0
+EOF
+assert_dm tbl 1
+cat <<EOF | dmsetup_reload tbl
+0 1024 linear $DEV 0
+EOF
+assert_dm tbl 1
+sudo dmsetup remove tbl
+
+cat <<EOF | sudo dmsetup create tbl
+0 1024 linear $DEV 0
+EOF
+assert_dm tbl 1
+cat <<EOF | dmsetup_reload tbl
+0 1024 linear $DEV 0
+1024 2048 linear $LOOP_DEV 0
+EOF
+assert_dm tbl 1
+cat <<EOF | dmsetup_reload tbl
+0 1024 error
+1024 2048 linear $LOOP_DEV 0
+EOF
+assert_dm tbl 0
+cat <<EOF | dmsetup_reload tbl
+0 1024 linear $DEV 0
+1024 2048 linear $LOOP_DEV 0
+EOF
+assert_dm tbl 1
+cat <<EOF | dmsetup_reload tbl
+0 1024 linear $DEV 0
+EOF
+assert_dm tbl 1
+sudo dmsetup remove tbl
+
+sudo losetup -d $LOOP_DEV
+rm loopfile
+
+sudo rbd unmap $DEV
+rbd rm $IMAGE_NAME
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_udev_enumerate.sh b/qa/workunits/rbd/krbd_udev_enumerate.sh
new file mode 100755
index 000000000..494f958f8
--- /dev/null
+++ b/qa/workunits/rbd/krbd_udev_enumerate.sh
@@ -0,0 +1,66 @@
+#!/usr/bin/env bash
+
+# This is a test for https://tracker.ceph.com/issues/41036, but it also
+# triggers https://tracker.ceph.com/issues/41404 in some environments.
+
+set -ex
+
+function assert_exit_codes() {
+ declare -a pids=($@)
+
+ for pid in ${pids[@]}; do
+ wait $pid
+ done
+}
+
+function run_map() {
+ declare -a pids
+
+ for i in {1..300}; do
+ sudo rbd map img$i &
+ pids+=($!)
+ done
+
+ assert_exit_codes ${pids[@]}
+ [[ $(rbd showmapped | wc -l) -eq 301 ]]
+}
+
+function run_unmap_by_dev() {
+ declare -a pids
+
+ run_map
+ for i in {0..299}; do
+ sudo rbd unmap /dev/rbd$i &
+ pids+=($!)
+ done
+
+ assert_exit_codes ${pids[@]}
+ [[ $(rbd showmapped | wc -l) -eq 0 ]]
+}
+
+function run_unmap_by_spec() {
+ declare -a pids
+
+ run_map
+ for i in {1..300}; do
+ sudo rbd unmap img$i &
+ pids+=($!)
+ done
+
+ assert_exit_codes ${pids[@]}
+ [[ $(rbd showmapped | wc -l) -eq 0 ]]
+}
+
+# Can't test with exclusive-lock, don't bother enabling deep-flatten.
+# See https://tracker.ceph.com/issues/42492.
+for i in {1..300}; do
+ rbd create --size 1 --image-feature '' img$i
+done
+
+for i in {1..30}; do
+ echo Iteration $i
+ run_unmap_by_dev
+ run_unmap_by_spec
+done
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_udev_netlink_enobufs.sh b/qa/workunits/rbd/krbd_udev_netlink_enobufs.sh
new file mode 100755
index 000000000..7c9c53a2f
--- /dev/null
+++ b/qa/workunits/rbd/krbd_udev_netlink_enobufs.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+
+# This is a test for https://tracker.ceph.com/issues/41404, verifying that udev
+# events are properly reaped while the image is being (un)mapped in the kernel.
+# UDEV_BUF_SIZE is 1M (giving us a 2M socket receive buffer), but modprobe +
+# modprobe -r generate ~28M worth of "block" events.
+
+set -ex
+
+rbd create --size 1 img
+
+ceph osd pause
+sudo rbd map img &
+PID=$!
+sudo modprobe scsi_debug max_luns=16 add_host=16 num_parts=1 num_tgts=16
+sudo udevadm settle
+sudo modprobe -r scsi_debug
+[[ $(rbd showmapped | wc -l) -eq 0 ]]
+ceph osd unpause
+wait $PID
+[[ $(rbd showmapped | wc -l) -eq 2 ]]
+sudo rbd unmap img
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_udev_netns.sh b/qa/workunits/rbd/krbd_udev_netns.sh
new file mode 100755
index 000000000..e746a682e
--- /dev/null
+++ b/qa/workunits/rbd/krbd_udev_netns.sh
@@ -0,0 +1,86 @@
+#!/usr/bin/env bash
+
+set -ex
+
+sudo ip netns add ns1
+sudo ip link add veth1-ext type veth peer name veth1-int
+sudo ip link set veth1-int netns ns1
+
+sudo ip netns exec ns1 ip link set dev lo up
+sudo ip netns exec ns1 ip addr add 192.168.1.2/24 dev veth1-int
+sudo ip netns exec ns1 ip link set veth1-int up
+sudo ip netns exec ns1 ip route add default via 192.168.1.1
+
+sudo ip addr add 192.168.1.1/24 dev veth1-ext
+sudo ip link set veth1-ext up
+
+# Enable forwarding between the namespace and the default route
+# interface and set up NAT. In case of multiple default routes,
+# just pick the first one.
+if [[ $(sysctl -n net.ipv4.ip_forward) -eq 0 ]]; then
+ sudo iptables -P FORWARD DROP
+ sudo sysctl -w net.ipv4.ip_forward=1
+fi
+IFACE="$(ip route list 0.0.0.0/0 | head -n 1 | cut -d ' ' -f 5)"
+sudo iptables -A FORWARD -i veth1-ext -o "$IFACE" -j ACCEPT
+sudo iptables -A FORWARD -i "$IFACE" -o veth1-ext -j ACCEPT
+sudo iptables -t nat -A POSTROUTING -s 192.168.1.2 -o "$IFACE" -j MASQUERADE
+
+rbd create --size 300 img
+
+DEV="$(sudo rbd map img)"
+mkfs.ext4 "$DEV"
+sudo mount "$DEV" /mnt
+sudo umount /mnt
+sudo rbd unmap "$DEV"
+
+sudo ip netns exec ns1 bash <<'EOF'
+
+set -ex
+
+DEV="/dev/rbd/rbd/img"
+[[ ! -e "$DEV" ]]
+
+# In a network namespace, "rbd map" maps the device and hangs waiting
+# for udev add uevents. udev runs as usual (in particular creating the
+# symlink which is used here because the device node is never printed),
+# but the uevents it sends out never come because they don't cross
+# network namespace boundaries.
+set +e
+timeout 30s rbd map img
+RET=$?
+set -e
+[[ $RET -eq 124 ]]
+[[ -L "$DEV" ]]
+mkfs.ext4 -F "$DEV"
+mount "$DEV" /mnt
+umount /mnt
+
+# In a network namespace, "rbd unmap" unmaps the device and hangs
+# waiting for udev remove uevents. udev runs as usual (removing the
+# symlink), but the uevents it sends out never come because they don't
+# cross network namespace boundaries.
+set +e
+timeout 30s rbd unmap "$DEV"
+RET=$?
+set -e
+[[ $RET -eq 124 ]]
+[[ ! -e "$DEV" ]]
+
+# Skip waiting for udev uevents with "-o noudev".
+DEV="$(rbd map -o noudev img)"
+mkfs.ext4 -F "$DEV"
+mount "$DEV" /mnt
+umount /mnt
+rbd unmap -o noudev "$DEV"
+
+EOF
+
+rbd rm img
+
+sudo iptables -t nat -D POSTROUTING -s 192.168.1.2 -o "$IFACE" -j MASQUERADE
+sudo iptables -D FORWARD -i "$IFACE" -o veth1-ext -j ACCEPT
+sudo iptables -D FORWARD -i veth1-ext -o "$IFACE" -j ACCEPT
+sudo ip netns delete ns1
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_udev_symlinks.sh b/qa/workunits/rbd/krbd_udev_symlinks.sh
new file mode 100755
index 000000000..271476527
--- /dev/null
+++ b/qa/workunits/rbd/krbd_udev_symlinks.sh
@@ -0,0 +1,116 @@
+#!/usr/bin/env bash
+
+set -ex
+
+SPECS=(
+rbd/img1
+rbd/img2
+rbd/img2@snap1
+rbd/img3
+rbd/img3@snap1
+rbd/img3@snap2
+rbd/ns1/img1
+rbd/ns1/img2
+rbd/ns1/img2@snap1
+rbd/ns1/img3
+rbd/ns1/img3@snap1
+rbd/ns1/img3@snap2
+rbd/ns2/img1
+rbd/ns2/img2
+rbd/ns2/img2@snap1
+rbd/ns2/img3
+rbd/ns2/img3@snap1
+rbd/ns2/img3@snap2
+custom/img1
+custom/img1@snap1
+custom/img2
+custom/img2@snap1
+custom/img2@snap2
+custom/img3
+custom/ns1/img1
+custom/ns1/img1@snap1
+custom/ns1/img2
+custom/ns1/img2@snap1
+custom/ns1/img2@snap2
+custom/ns1/img3
+custom/ns2/img1
+custom/ns2/img1@snap1
+custom/ns2/img2
+custom/ns2/img2@snap1
+custom/ns2/img2@snap2
+custom/ns2/img3
+)
+
+ceph osd pool create custom 8
+rbd pool init custom
+
+ceph osd set-require-min-compat-client nautilus
+rbd namespace create rbd/ns1
+rbd namespace create rbd/ns2
+rbd namespace create custom/ns1
+rbd namespace create custom/ns2
+
+# create in order, images before snapshots
+for spec in "${SPECS[@]}"; do
+ if [[ "$spec" =~ snap ]]; then
+ rbd snap create "$spec"
+ else
+ rbd create --size 10 "$spec"
+ DEV="$(sudo rbd map "$spec")"
+ sudo sfdisk "$DEV" <<EOF
+unit: sectors
+${DEV}p1 : start= 2048, size= 2, type=83
+${DEV}p2 : start= 4096, size= 2, type=83
+EOF
+ sudo rbd unmap "$DEV"
+ fi
+done
+
+[[ ! -e /dev/rbd ]]
+
+# map in random order
+COUNT=${#SPECS[@]}
+read -r -a INDEXES < <(python3 <<EOF
+import random
+l = list(range($COUNT))
+random.shuffle(l)
+print(*l)
+EOF
+)
+
+DEVS=()
+for idx in "${INDEXES[@]}"; do
+ DEVS+=("$(sudo rbd map "${SPECS[idx]}")")
+done
+
+[[ $(rbd showmapped | wc -l) -eq $((COUNT + 1)) ]]
+
+for ((i = 0; i < COUNT; i++)); do
+ [[ "$(readlink -e "/dev/rbd/${SPECS[INDEXES[i]]}")" == "${DEVS[i]}" ]]
+ [[ "$(readlink -e "/dev/rbd/${SPECS[INDEXES[i]]}-part1")" == "${DEVS[i]}p1" ]]
+ [[ "$(readlink -e "/dev/rbd/${SPECS[INDEXES[i]]}-part2")" == "${DEVS[i]}p2" ]]
+done
+
+for idx in "${INDEXES[@]}"; do
+ sudo rbd unmap "/dev/rbd/${SPECS[idx]}"
+done
+
+[[ ! -e /dev/rbd ]]
+
+# remove in reverse order, snapshots before images
+for ((i = COUNT - 1; i >= 0; i--)); do
+ if [[ "${SPECS[i]}" =~ snap ]]; then
+ rbd snap rm "${SPECS[i]}"
+ else
+ rbd rm "${SPECS[i]}"
+ fi
+done
+
+rbd namespace rm custom/ns2
+rbd namespace rm custom/ns1
+rbd namespace rm rbd/ns2
+rbd namespace rm rbd/ns1
+
+ceph osd pool delete custom custom --yes-i-really-really-mean-it
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_wac.sh b/qa/workunits/rbd/krbd_wac.sh
new file mode 100755
index 000000000..134460409
--- /dev/null
+++ b/qa/workunits/rbd/krbd_wac.sh
@@ -0,0 +1,40 @@
+#!/usr/bin/env bash
+
+set -ex
+
+wget http://download.ceph.com/qa/wac.c
+gcc -o wac wac.c
+
+rbd create --size 300 img
+DEV=$(sudo rbd map img)
+
+sudo mkfs.ext4 $DEV
+sudo mount $DEV /mnt
+set +e
+sudo timeout 5m ./wac -l 65536 -n 64 -r /mnt/wac-test
+RET=$?
+set -e
+[[ $RET -eq 124 ]]
+sudo killall -w wac || true # wac forks
+sudo umount /mnt
+
+sudo wipefs -a $DEV
+sudo vgcreate vg_img $DEV
+sudo lvcreate -L 256M -n lv_img vg_img
+udevadm settle
+sudo mkfs.ext4 /dev/mapper/vg_img-lv_img
+sudo mount /dev/mapper/vg_img-lv_img /mnt
+set +e
+sudo timeout 5m ./wac -l 65536 -n 64 -r /mnt/wac-test
+RET=$?
+set -e
+[[ $RET -eq 124 ]]
+sudo killall -w wac || true # wac forks
+sudo umount /mnt
+sudo vgremove -f vg_img
+sudo pvremove $DEV
+
+sudo rbd unmap $DEV
+rbd rm img
+
+echo OK
diff --git a/qa/workunits/rbd/krbd_watch_errors.sh b/qa/workunits/rbd/krbd_watch_errors.sh
new file mode 100755
index 000000000..f650d2a74
--- /dev/null
+++ b/qa/workunits/rbd/krbd_watch_errors.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+
+set -ex
+set -o pipefail
+
+function refresh_loop() {
+ local dev_id="$1"
+
+ set +x
+
+ local i
+ for ((i = 1; ; i++)); do
+ echo 1 | sudo tee "${SYSFS_DIR}/${dev_id}/refresh" > /dev/null
+ if ((i % 100 == 0)); then
+ echo "Refreshed ${i} times"
+ fi
+ done
+}
+
+readonly SYSFS_DIR="/sys/bus/rbd/devices"
+readonly IMAGE_NAME="watch-errors-test"
+
+rbd create -s 1G --image-feature exclusive-lock "${IMAGE_NAME}"
+
+# induce a watch error every 30 seconds
+dev="$(sudo rbd device map -o osdkeepalive=60 "${IMAGE_NAME}")"
+dev_id="${dev#/dev/rbd}"
+
+# constantly refresh, not just on watch errors
+refresh_loop "${dev_id}" &
+refresh_pid=$!
+
+sudo dmesg -C
+
+# test that none of the above triggers a deadlock with a workload
+fio --name test --filename="${dev}" --ioengine=libaio --direct=1 \
+ --rw=randwrite --norandommap --randrepeat=0 --bs=512 --iodepth=128 \
+ --time_based --runtime=1h --eta=never
+
+num_errors="$(dmesg | grep -c "rbd${dev_id}: encountered watch error")"
+echo "Recorded ${num_errors} watch errors"
+
+kill "${refresh_pid}"
+wait
+
+sudo rbd device unmap "${dev}"
+
+if ((num_errors < 60)); then
+ echo "Too few watch errors"
+ exit 1
+fi
+
+echo OK
diff --git a/qa/workunits/rbd/luks-encryption.sh b/qa/workunits/rbd/luks-encryption.sh
new file mode 100755
index 000000000..5d3cc68cd
--- /dev/null
+++ b/qa/workunits/rbd/luks-encryption.sh
@@ -0,0 +1,217 @@
+#!/usr/bin/env bash
+set -ex
+
+CEPH_ID=${CEPH_ID:-admin}
+TMP_FILES="/tmp/passphrase /tmp/passphrase2 /tmp/testdata1 /tmp/testdata2 /tmp/cmpdata"
+
+_sudo()
+{
+ local cmd
+
+ if [ `id -u` -eq 0 ]
+ then
+ "$@"
+ return $?
+ fi
+
+ # Look for the command in the user path. If it fails run it as is,
+ # supposing it is in sudo path.
+ cmd=`which $1 2>/dev/null` || cmd=$1
+ shift
+ sudo -nE "${cmd}" "$@"
+}
+
+function drop_caches {
+ sudo sync
+ echo 3 | sudo tee /proc/sys/vm/drop_caches
+}
+
+function expect_false() {
+ if "$@"; then return 1; else return 0; fi
+}
+
+function test_encryption_format() {
+ local format=$1
+ clean_up_cryptsetup
+
+ # format
+ rbd encryption format testimg $format /tmp/passphrase
+ drop_caches
+
+ # open encryption with cryptsetup
+ sudo cryptsetup open $RAW_DEV --type luks cryptsetupdev -d /tmp/passphrase
+ sudo chmod 666 /dev/mapper/cryptsetupdev
+
+ # open encryption with librbd
+ LIBRBD_DEV=$(_sudo rbd -p rbd map testimg -t nbd -o encryption-passphrase-file=/tmp/passphrase)
+ sudo chmod 666 $LIBRBD_DEV
+
+ # write via librbd && compare
+ dd if=/tmp/testdata1 of=$LIBRBD_DEV oflag=direct bs=1M
+ dd if=/dev/mapper/cryptsetupdev of=/tmp/cmpdata iflag=direct bs=4M count=4
+ cmp -n 16MB /tmp/cmpdata /tmp/testdata1
+
+ # write via cryptsetup && compare
+ dd if=/tmp/testdata2 of=/dev/mapper/cryptsetupdev oflag=direct bs=1M
+ dd if=$LIBRBD_DEV of=/tmp/cmpdata iflag=direct bs=4M count=4
+ cmp -n 16MB /tmp/cmpdata /tmp/testdata2
+
+ # FIXME: encryption-aware flatten/resize misbehave if proxied to
+ # RAW_DEV mapping (i.e. if RAW_DEV mapping ows the lock)
+ # (acquire and) release the lock as a side effect
+ rbd bench --io-type read --io-size 1 --io-threads 1 --io-total 1 testimg
+
+ # check that encryption-aware resize compensates LUKS header overhead
+ (( $(sudo blockdev --getsize64 $LIBRBD_DEV) < (32 << 20) ))
+ expect_false rbd resize --size 32M testimg
+ rbd resize --size 32M --encryption-passphrase-file /tmp/passphrase testimg
+ (( $(sudo blockdev --getsize64 $LIBRBD_DEV) == (32 << 20) ))
+
+ _sudo rbd device unmap -t nbd $LIBRBD_DEV
+}
+
+function test_clone_encryption() {
+ clean_up_cryptsetup
+
+ # write 1MB plaintext
+ dd if=/tmp/testdata1 of=$RAW_DEV oflag=direct bs=1M count=1
+
+ # clone (luks1)
+ rbd snap create testimg@snap
+ rbd snap protect testimg@snap
+ rbd clone testimg@snap testimg1
+ rbd encryption format testimg1 luks1 /tmp/passphrase
+
+ # open encryption with librbd, write one more MB, close
+ LIBRBD_DEV=$(_sudo rbd -p rbd map testimg1 -t nbd -o encryption-format=luks1,encryption-passphrase-file=/tmp/passphrase)
+ sudo chmod 666 $LIBRBD_DEV
+ dd if=$LIBRBD_DEV of=/tmp/cmpdata iflag=direct bs=1M count=1
+ cmp -n 1MB /tmp/cmpdata /tmp/testdata1
+ dd if=/tmp/testdata1 of=$LIBRBD_DEV seek=1 skip=1 oflag=direct bs=1M count=1
+ _sudo rbd device unmap -t nbd $LIBRBD_DEV
+
+ # second clone (luks2)
+ rbd snap create testimg1@snap
+ rbd snap protect testimg1@snap
+ rbd clone testimg1@snap testimg2
+ rbd encryption format testimg2 luks2 /tmp/passphrase2
+
+ # open encryption with librbd, write one more MB, close
+ LIBRBD_DEV=$(_sudo rbd -p rbd map testimg2 -t nbd -o encryption-format=luks2,encryption-passphrase-file=/tmp/passphrase2,encryption-format=luks1,encryption-passphrase-file=/tmp/passphrase)
+ sudo chmod 666 $LIBRBD_DEV
+ dd if=$LIBRBD_DEV of=/tmp/cmpdata iflag=direct bs=1M count=2
+ cmp -n 2MB /tmp/cmpdata /tmp/testdata1
+ dd if=/tmp/testdata1 of=$LIBRBD_DEV seek=2 skip=2 oflag=direct bs=1M count=1
+ _sudo rbd device unmap -t nbd $LIBRBD_DEV
+
+ # flatten
+ expect_false rbd flatten testimg2 --encryption-format luks1 --encryption-format luks2 --encryption-passphrase-file /tmp/passphrase2 --encryption-passphrase-file /tmp/passphrase
+ rbd flatten testimg2 --encryption-format luks2 --encryption-format luks1 --encryption-passphrase-file /tmp/passphrase2 --encryption-passphrase-file /tmp/passphrase
+
+ # verify with cryptsetup
+ RAW_FLAT_DEV=$(_sudo rbd -p rbd map testimg2 -t nbd)
+ sudo cryptsetup open $RAW_FLAT_DEV --type luks cryptsetupdev -d /tmp/passphrase2
+ sudo chmod 666 /dev/mapper/cryptsetupdev
+ dd if=/dev/mapper/cryptsetupdev of=/tmp/cmpdata iflag=direct bs=1M count=3
+ cmp -n 3MB /tmp/cmpdata /tmp/testdata1
+ _sudo rbd device unmap -t nbd $RAW_FLAT_DEV
+}
+
+function test_clone_and_load_with_a_single_passphrase {
+ local expectedfail=$1
+
+ # clone and format
+ rbd snap create testimg@snap
+ rbd snap protect testimg@snap
+ rbd clone testimg@snap testimg1
+ rbd encryption format testimg1 luks2 /tmp/passphrase2
+
+ if [ "$expectedfail" = "true" ]
+ then
+ expect_false rbd flatten testimg1 --encryption-passphrase-file /tmp/passphrase2
+ rbd flatten testimg1 --encryption-passphrase-file /tmp/passphrase2 --encryption-passphrase-file /tmp/passphrase
+ else
+ rbd flatten testimg1 --encryption-passphrase-file /tmp/passphrase2
+ fi
+
+ rbd remove testimg1
+ rbd snap unprotect testimg@snap
+ rbd snap remove testimg@snap
+}
+
+function test_plaintext_detection {
+ # 16k LUKS header
+ sudo cryptsetup -q luksFormat --type luks2 --luks2-metadata-size 16k $RAW_DEV /tmp/passphrase
+ test_clone_and_load_with_a_single_passphrase true
+
+ # 4m LUKS header
+ sudo cryptsetup -q luksFormat --type luks2 --luks2-metadata-size 4m $RAW_DEV /tmp/passphrase
+ test_clone_and_load_with_a_single_passphrase true
+
+ # no luks header
+ dd if=/dev/zero of=$RAW_DEV oflag=direct bs=4M count=8
+ test_clone_and_load_with_a_single_passphrase false
+}
+
+function get_nbd_device_paths {
+ rbd device list -t nbd | tail -n +2 | egrep "\s+rbd\s+testimg" | awk '{print $5;}'
+}
+
+function clean_up_cryptsetup() {
+ ls /dev/mapper/cryptsetupdev && sudo cryptsetup close cryptsetupdev || true
+}
+
+function clean_up {
+ sudo rm -f $TMP_FILES
+ clean_up_cryptsetup
+ for device in $(get_nbd_device_paths); do
+ _sudo rbd device unmap -t nbd $device
+ done
+
+ rbd remove testimg2 || true
+ rbd snap unprotect testimg1@snap || true
+ rbd snap remove testimg1@snap || true
+ rbd remove testimg1 || true
+ rbd snap unprotect testimg@snap || true
+ rbd snap remove testimg@snap || true
+ rbd remove testimg || true
+}
+
+if [[ $(uname) != "Linux" ]]; then
+ echo "LUKS encryption tests only supported on Linux"
+ exit 0
+fi
+
+
+if [[ $(($(ceph-conf --name client.${CEPH_ID} --show-config-value rbd_default_features) & 64)) != 0 ]]; then
+ echo "LUKS encryption tests not supported alongside image journaling feature"
+ exit 0
+fi
+
+clean_up
+
+trap clean_up INT TERM EXIT
+
+# generate test data
+dd if=/dev/urandom of=/tmp/testdata1 bs=4M count=4
+dd if=/dev/urandom of=/tmp/testdata2 bs=4M count=4
+
+# create passphrase files
+printf "pass\0word\n" > /tmp/passphrase
+printf "\t password2 " > /tmp/passphrase2
+
+# create an image
+rbd create testimg --size=32M
+
+# map raw data to nbd device
+RAW_DEV=$(_sudo rbd -p rbd map testimg -t nbd)
+sudo chmod 666 $RAW_DEV
+
+test_plaintext_detection
+
+test_encryption_format luks1
+test_encryption_format luks2
+
+test_clone_encryption
+
+echo OK
diff --git a/qa/workunits/rbd/map-snapshot-io.sh b/qa/workunits/rbd/map-snapshot-io.sh
new file mode 100755
index 000000000..a69d84829
--- /dev/null
+++ b/qa/workunits/rbd/map-snapshot-io.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+# http://tracker.ceph.com/issues/3964
+
+set -ex
+
+rbd create image -s 100
+DEV=$(sudo rbd map image)
+dd if=/dev/zero of=$DEV oflag=direct count=10
+rbd snap create image@s1
+dd if=/dev/zero of=$DEV oflag=direct count=10 # used to fail
+rbd snap rm image@s1
+dd if=/dev/zero of=$DEV oflag=direct count=10
+sudo rbd unmap $DEV
+rbd rm image
+
+echo OK
diff --git a/qa/workunits/rbd/map-unmap.sh b/qa/workunits/rbd/map-unmap.sh
new file mode 100755
index 000000000..99863849e
--- /dev/null
+++ b/qa/workunits/rbd/map-unmap.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+set -ex
+
+RUN_TIME=300 # approximate duration of run (seconds)
+
+[ $# -eq 1 ] && RUN_TIME="$1"
+
+IMAGE_NAME="image-$$"
+IMAGE_SIZE="1024" # MB
+
+function get_time() {
+ date '+%s'
+}
+
+function times_up() {
+ local end_time="$1"
+
+ test $(get_time) -ge "${end_time}"
+}
+
+function map_unmap() {
+ [ $# -eq 1 ] || exit 99
+ local image_name="$1"
+
+ local dev
+ dev="$(sudo rbd map "${image_name}")"
+ sudo rbd unmap "${dev}"
+}
+
+#### Start
+
+rbd create "${IMAGE_NAME}" --size="${IMAGE_SIZE}"
+
+COUNT=0
+START_TIME=$(get_time)
+END_TIME=$(expr $(get_time) + ${RUN_TIME})
+while ! times_up "${END_TIME}"; do
+ map_unmap "${IMAGE_NAME}"
+ COUNT=$(expr $COUNT + 1)
+done
+ELAPSED=$(expr "$(get_time)" - "${START_TIME}")
+
+rbd rm "${IMAGE_NAME}"
+
+echo "${COUNT} iterations completed in ${ELAPSED} seconds"
diff --git a/qa/workunits/rbd/merge_diff.sh b/qa/workunits/rbd/merge_diff.sh
new file mode 100755
index 000000000..eb8597304
--- /dev/null
+++ b/qa/workunits/rbd/merge_diff.sh
@@ -0,0 +1,477 @@
+#!/usr/bin/env bash
+set -ex
+
+export RBD_FORCE_ALLOW_V1=1
+
+pool=rbd
+gen=$pool/gen
+out=$pool/out
+testno=1
+
+mkdir -p merge_diff_test
+pushd merge_diff_test
+
+function expect_false()
+{
+ if "$@"; then return 1; else return 0; fi
+}
+
+function clear_all()
+{
+ fusermount -u mnt || true
+
+ rbd snap purge --no-progress $gen || true
+ rbd rm --no-progress $gen || true
+ rbd snap purge --no-progress $out || true
+ rbd rm --no-progress $out || true
+
+ rm -rf diffs || true
+}
+
+function rebuild()
+{
+ clear_all
+ echo Starting test $testno
+ ((testno++))
+ if [[ "$2" -lt "$1" ]] && [[ "$3" -gt "1" ]]; then
+ rbd create $gen --size 100 --object-size $1 --stripe-unit $2 --stripe-count $3 --image-format $4
+ else
+ rbd create $gen --size 100 --object-size $1 --image-format $4
+ fi
+ rbd create $out --size 1 --object-size 524288
+ mkdir -p mnt diffs
+ # lttng has atexit handlers that need to be fork/clone aware
+ LD_PRELOAD=liblttng-ust-fork.so.0 rbd-fuse -p $pool mnt
+}
+
+function write()
+{
+ dd if=/dev/urandom of=mnt/gen bs=1M conv=notrunc seek=$1 count=$2
+}
+
+function snap()
+{
+ rbd snap create $gen@$1
+}
+
+function resize()
+{
+ rbd resize --no-progress $gen --size $1 --allow-shrink
+}
+
+function export_diff()
+{
+ if [ $2 == "head" ]; then
+ target="$gen"
+ else
+ target="$gen@$2"
+ fi
+ if [ $1 == "null" ]; then
+ rbd export-diff --no-progress $target diffs/$1.$2
+ else
+ rbd export-diff --no-progress $target --from-snap $1 diffs/$1.$2
+ fi
+}
+
+function merge_diff()
+{
+ rbd merge-diff diffs/$1.$2 diffs/$2.$3 diffs/$1.$3
+}
+
+function check()
+{
+ rbd import-diff --no-progress diffs/$1.$2 $out || return -1
+ if [ "$2" == "head" ]; then
+ sum1=`rbd export $gen - | md5sum`
+ else
+ sum1=`rbd export $gen@$2 - | md5sum`
+ fi
+ sum2=`rbd export $out - | md5sum`
+ if [ "$sum1" != "$sum2" ]; then
+ exit -1
+ fi
+ if [ "$2" != "head" ]; then
+ rbd snap ls $out | awk '{print $2}' | grep "^$2\$" || return -1
+ fi
+}
+
+#test f/t header
+rebuild 4194304 4194304 1 2
+write 0 1
+snap a
+write 1 1
+export_diff null a
+export_diff a head
+merge_diff null a head
+check null head
+
+rebuild 4194304 4194304 1 2
+write 0 1
+snap a
+write 1 1
+snap b
+write 2 1
+export_diff null a
+export_diff a b
+export_diff b head
+merge_diff null a b
+check null b
+
+rebuild 4194304 4194304 1 2
+write 0 1
+snap a
+write 1 1
+snap b
+write 2 1
+export_diff null a
+export_diff a b
+export_diff b head
+merge_diff a b head
+check null a
+check a head
+
+rebuild 4194304 4194304 1 2
+write 0 1
+snap a
+write 1 1
+snap b
+write 2 1
+export_diff null a
+export_diff a b
+export_diff b head
+rbd merge-diff diffs/null.a diffs/a.b - | rbd merge-diff - diffs/b.head - > diffs/null.head
+check null head
+
+#data test
+rebuild 4194304 4194304 1 2
+write 4 2
+snap s101
+write 0 3
+write 8 2
+snap s102
+export_diff null s101
+export_diff s101 s102
+merge_diff null s101 s102
+check null s102
+
+rebuild 4194304 4194304 1 2
+write 0 3
+write 2 5
+write 8 2
+snap s201
+write 0 2
+write 6 3
+snap s202
+export_diff null s201
+export_diff s201 s202
+merge_diff null s201 s202
+check null s202
+
+rebuild 4194304 4194304 1 2
+write 0 4
+write 12 6
+snap s301
+write 0 6
+write 10 5
+write 16 4
+snap s302
+export_diff null s301
+export_diff s301 s302
+merge_diff null s301 s302
+check null s302
+
+rebuild 4194304 4194304 1 2
+write 0 12
+write 14 2
+write 18 2
+snap s401
+write 1 2
+write 5 6
+write 13 3
+write 18 2
+snap s402
+export_diff null s401
+export_diff s401 s402
+merge_diff null s401 s402
+check null s402
+
+rebuild 4194304 4194304 1 2
+write 2 4
+write 10 12
+write 27 6
+write 36 4
+snap s501
+write 0 24
+write 28 4
+write 36 4
+snap s502
+export_diff null s501
+export_diff s501 s502
+merge_diff null s501 s502
+check null s502
+
+rebuild 4194304 4194304 1 2
+write 0 8
+resize 5
+snap r1
+resize 20
+write 12 8
+snap r2
+resize 8
+write 4 4
+snap r3
+export_diff null r1
+export_diff r1 r2
+export_diff r2 r3
+merge_diff null r1 r2
+merge_diff null r2 r3
+check null r3
+
+rebuild 4194304 4194304 1 2
+write 0 8
+resize 5
+snap r1
+resize 20
+write 12 8
+snap r2
+resize 8
+write 4 4
+snap r3
+resize 10
+snap r4
+export_diff null r1
+export_diff r1 r2
+export_diff r2 r3
+export_diff r3 r4
+merge_diff null r1 r2
+merge_diff null r2 r3
+merge_diff null r3 r4
+check null r4
+
+# merge diff doesn't yet support fancy striping
+# rebuild 4194304 65536 8 2
+# write 0 32
+# snap r1
+# write 16 32
+# snap r2
+# export_diff null r1
+# export_diff r1 r2
+# expect_false merge_diff null r1 r2
+
+rebuild 4194304 4194304 1 2
+write 0 1
+write 2 1
+write 4 1
+write 6 1
+snap s1
+write 1 1
+write 3 1
+write 5 1
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 1 1
+write 3 1
+write 5 1
+snap s1
+write 0 1
+write 2 1
+write 4 1
+write 6 1
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 3
+write 6 3
+write 12 3
+snap s1
+write 1 1
+write 7 1
+write 13 1
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 3
+write 6 3
+write 12 3
+snap s1
+write 0 1
+write 6 1
+write 12 1
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 3
+write 6 3
+write 12 3
+snap s1
+write 2 1
+write 8 1
+write 14 1
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 1 1
+write 7 1
+write 13 1
+snap s1
+write 0 3
+write 6 3
+write 12 3
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 1
+write 6 1
+write 12 1
+snap s1
+write 0 3
+write 6 3
+write 12 3
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 2 1
+write 8 1
+write 14 1
+snap s1
+write 0 3
+write 6 3
+write 12 3
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 3
+write 6 3
+write 12 3
+snap s1
+write 0 3
+write 6 3
+write 12 3
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 2 4
+write 8 4
+write 14 4
+snap s1
+write 0 3
+write 6 3
+write 12 3
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 4
+write 6 4
+write 12 4
+snap s1
+write 0 3
+write 6 3
+write 12 3
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 6
+write 6 6
+write 12 6
+snap s1
+write 0 3
+write 6 3
+write 12 3
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 3 6
+write 9 6
+write 15 6
+snap s1
+write 0 3
+write 6 3
+write 12 3
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 8
+snap s1
+resize 2
+resize 100
+snap s2
+export_diff null s1
+export_diff s1 s2
+merge_diff null s1 s2
+check null s2
+
+rebuild 4194304 4194304 1 2
+write 0 8
+snap s1
+resize 2
+resize 100
+snap s2
+write 20 2
+snap s3
+export_diff null s1
+export_diff s1 s2
+export_diff s2 s3
+merge_diff s1 s2 s3
+check null s1
+check s1 s3
+
+#addme
+
+clear_all
+popd
+rm -rf merge_diff_test
+
+echo OK
diff --git a/qa/workunits/rbd/notify_master.sh b/qa/workunits/rbd/notify_master.sh
new file mode 100755
index 000000000..99ccd74db
--- /dev/null
+++ b/qa/workunits/rbd/notify_master.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -ex
+
+relpath=$(dirname $0)/../../../src/test/librbd
+python3 $relpath/test_notify.py master
+exit 0
diff --git a/qa/workunits/rbd/notify_slave.sh b/qa/workunits/rbd/notify_slave.sh
new file mode 100755
index 000000000..7f49a0c7d
--- /dev/null
+++ b/qa/workunits/rbd/notify_slave.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -ex
+
+relpath=$(dirname $0)/../../../src/test/librbd
+python3 $relpath/test_notify.py slave
+exit 0
diff --git a/qa/workunits/rbd/permissions.sh b/qa/workunits/rbd/permissions.sh
new file mode 100755
index 000000000..f8a9aaa71
--- /dev/null
+++ b/qa/workunits/rbd/permissions.sh
@@ -0,0 +1,269 @@
+#!/usr/bin/env bash
+set -ex
+
+IMAGE_FEATURES="layering,exclusive-lock,object-map,fast-diff"
+
+clone_v2_enabled() {
+ image_spec=$1
+ rbd info $image_spec | grep "clone-parent"
+}
+
+create_pools() {
+ ceph osd pool create images 32
+ rbd pool init images
+ ceph osd pool create volumes 32
+ rbd pool init volumes
+}
+
+delete_pools() {
+ (ceph osd pool delete images images --yes-i-really-really-mean-it || true) >/dev/null 2>&1
+ (ceph osd pool delete volumes volumes --yes-i-really-really-mean-it || true) >/dev/null 2>&1
+
+}
+
+recreate_pools() {
+ delete_pools
+ create_pools
+}
+
+delete_users() {
+ (ceph auth del client.volumes || true) >/dev/null 2>&1
+ (ceph auth del client.images || true) >/dev/null 2>&1
+
+ (ceph auth del client.snap_none || true) >/dev/null 2>&1
+ (ceph auth del client.snap_all || true) >/dev/null 2>&1
+ (ceph auth del client.snap_pool || true) >/dev/null 2>&1
+ (ceph auth del client.snap_profile_all || true) >/dev/null 2>&1
+ (ceph auth del client.snap_profile_pool || true) >/dev/null 2>&1
+
+ (ceph auth del client.mon_write || true) >/dev/null 2>&1
+}
+
+create_users() {
+ ceph auth get-or-create client.volumes \
+ mon 'profile rbd' \
+ osd 'profile rbd pool=volumes, profile rbd-read-only pool=images' \
+ mgr 'profile rbd pool=volumes, profile rbd-read-only pool=images' >> $KEYRING
+ ceph auth get-or-create client.images mon 'profile rbd' osd 'profile rbd pool=images' >> $KEYRING
+
+ ceph auth get-or-create client.snap_none mon 'allow r' >> $KEYRING
+ ceph auth get-or-create client.snap_all mon 'allow r' osd 'allow w' >> $KEYRING
+ ceph auth get-or-create client.snap_pool mon 'allow r' osd 'allow w pool=images' >> $KEYRING
+ ceph auth get-or-create client.snap_profile_all mon 'allow r' osd 'profile rbd' >> $KEYRING
+ ceph auth get-or-create client.snap_profile_pool mon 'allow r' osd 'profile rbd pool=images' >> $KEYRING
+
+ ceph auth get-or-create client.mon_write mon 'allow *' >> $KEYRING
+}
+
+expect() {
+
+ set +e
+
+ local expected_ret=$1
+ local ret
+
+ shift
+ cmd=$@
+
+ eval $cmd
+ ret=$?
+
+ set -e
+
+ if [[ $ret -ne $expected_ret ]]; then
+ echo "ERROR: running \'$cmd\': expected $expected_ret got $ret"
+ return 1
+ fi
+
+ return 0
+}
+
+test_images_access() {
+ rbd -k $KEYRING --id images create --image-format 2 --image-feature $IMAGE_FEATURES -s 1 images/foo
+ rbd -k $KEYRING --id images snap create images/foo@snap
+ rbd -k $KEYRING --id images snap protect images/foo@snap
+ rbd -k $KEYRING --id images snap unprotect images/foo@snap
+ rbd -k $KEYRING --id images snap protect images/foo@snap
+ rbd -k $KEYRING --id images export images/foo@snap - >/dev/null
+ expect 16 rbd -k $KEYRING --id images snap rm images/foo@snap
+
+ rbd -k $KEYRING --id volumes clone --image-feature $IMAGE_FEATURES images/foo@snap volumes/child
+
+ if ! clone_v2_enabled images/foo; then
+ expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
+ fi
+
+ expect 1 rbd -k $KEYRING --id volumes snap unprotect images/foo@snap
+ expect 1 rbd -k $KEYRING --id images flatten volumes/child
+ rbd -k $KEYRING --id volumes flatten volumes/child
+ expect 1 rbd -k $KEYRING --id volumes snap unprotect images/foo@snap
+ rbd -k $KEYRING --id images snap unprotect images/foo@snap
+
+ expect 39 rbd -k $KEYRING --id images rm images/foo
+ rbd -k $KEYRING --id images snap rm images/foo@snap
+ rbd -k $KEYRING --id images rm images/foo
+ rbd -k $KEYRING --id volumes rm volumes/child
+}
+
+test_volumes_access() {
+ rbd -k $KEYRING --id images create --image-format 2 --image-feature $IMAGE_FEATURES -s 1 images/foo
+ rbd -k $KEYRING --id images snap create images/foo@snap
+ rbd -k $KEYRING --id images snap protect images/foo@snap
+
+ # commands that work with read-only access
+ rbd -k $KEYRING --id volumes info images/foo@snap
+ rbd -k $KEYRING --id volumes snap ls images/foo
+ rbd -k $KEYRING --id volumes export images/foo - >/dev/null
+ rbd -k $KEYRING --id volumes cp images/foo volumes/foo_copy
+ rbd -k $KEYRING --id volumes rm volumes/foo_copy
+ rbd -k $KEYRING --id volumes children images/foo@snap
+ rbd -k $KEYRING --id volumes lock list images/foo
+
+ # commands that fail with read-only access
+ expect 1 rbd -k $KEYRING --id volumes resize -s 2 images/foo --allow-shrink
+ expect 1 rbd -k $KEYRING --id volumes snap create images/foo@2
+ expect 1 rbd -k $KEYRING --id volumes snap rollback images/foo@snap
+ expect 1 rbd -k $KEYRING --id volumes snap remove images/foo@snap
+ expect 1 rbd -k $KEYRING --id volumes snap purge images/foo
+ expect 1 rbd -k $KEYRING --id volumes snap unprotect images/foo@snap
+ expect 1 rbd -k $KEYRING --id volumes flatten images/foo
+ expect 1 rbd -k $KEYRING --id volumes lock add images/foo test
+ expect 1 rbd -k $KEYRING --id volumes lock remove images/foo test locker
+ expect 1 rbd -k $KEYRING --id volumes ls rbd
+
+ # create clone and snapshot
+ rbd -k $KEYRING --id volumes clone --image-feature $IMAGE_FEATURES images/foo@snap volumes/child
+ rbd -k $KEYRING --id volumes snap create volumes/child@snap1
+ rbd -k $KEYRING --id volumes snap protect volumes/child@snap1
+ rbd -k $KEYRING --id volumes snap create volumes/child@snap2
+
+ # make sure original snapshot stays protected
+ if clone_v2_enabled images/foo; then
+ rbd -k $KEYRING --id volumes flatten volumes/child
+ rbd -k $KEYRING --id volumes snap rm volumes/child@snap2
+ rbd -k $KEYRING --id volumes snap unprotect volumes/child@snap1
+ else
+ expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
+ rbd -k $KEYRING --id volumes flatten volumes/child
+ expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
+ rbd -k $KEYRING --id volumes snap rm volumes/child@snap2
+ expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
+ expect 2 rbd -k $KEYRING --id volumes snap rm volumes/child@snap2
+ rbd -k $KEYRING --id volumes snap unprotect volumes/child@snap1
+ expect 16 rbd -k $KEYRING --id images snap unprotect images/foo@snap
+ fi
+
+ # clean up
+ rbd -k $KEYRING --id volumes snap rm volumes/child@snap1
+ rbd -k $KEYRING --id images snap unprotect images/foo@snap
+ rbd -k $KEYRING --id images snap rm images/foo@snap
+ rbd -k $KEYRING --id images rm images/foo
+ rbd -k $KEYRING --id volumes rm volumes/child
+}
+
+create_self_managed_snapshot() {
+ ID=$1
+ POOL=$2
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python3
+import rados
+
+with rados.Rados(conffile="", rados_id="${ID}") as cluster:
+ ioctx = cluster.open_ioctx("${POOL}")
+
+ snap_id = ioctx.create_self_managed_snap()
+ print ("Created snap id {}".format(snap_id))
+EOF
+}
+
+remove_self_managed_snapshot() {
+ ID=$1
+ POOL=$2
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python3
+import rados
+
+with rados.Rados(conffile="", rados_id="mon_write") as cluster1, \
+ rados.Rados(conffile="", rados_id="${ID}") as cluster2:
+ ioctx1 = cluster1.open_ioctx("${POOL}")
+
+ snap_id = ioctx1.create_self_managed_snap()
+ print ("Created snap id {}".format(snap_id))
+
+ ioctx2 = cluster2.open_ioctx("${POOL}")
+
+ ioctx2.remove_self_managed_snap(snap_id)
+ print ("Removed snap id {}".format(snap_id))
+EOF
+}
+
+test_remove_self_managed_snapshots() {
+ # Ensure users cannot create self-managed snapshots w/o permissions
+ expect 1 create_self_managed_snapshot snap_none images
+ expect 1 create_self_managed_snapshot snap_none volumes
+
+ create_self_managed_snapshot snap_all images
+ create_self_managed_snapshot snap_all volumes
+
+ create_self_managed_snapshot snap_pool images
+ expect 1 create_self_managed_snapshot snap_pool volumes
+
+ create_self_managed_snapshot snap_profile_all images
+ create_self_managed_snapshot snap_profile_all volumes
+
+ create_self_managed_snapshot snap_profile_pool images
+ expect 1 create_self_managed_snapshot snap_profile_pool volumes
+
+ # Ensure users cannot delete self-managed snapshots w/o permissions
+ expect 1 remove_self_managed_snapshot snap_none images
+ expect 1 remove_self_managed_snapshot snap_none volumes
+
+ remove_self_managed_snapshot snap_all images
+ remove_self_managed_snapshot snap_all volumes
+
+ remove_self_managed_snapshot snap_pool images
+ expect 1 remove_self_managed_snapshot snap_pool volumes
+
+ remove_self_managed_snapshot snap_profile_all images
+ remove_self_managed_snapshot snap_profile_all volumes
+
+ remove_self_managed_snapshot snap_profile_pool images
+ expect 1 remove_self_managed_snapshot snap_profile_pool volumes
+}
+
+test_rbd_support() {
+ # read-only commands should work on both pools
+ ceph -k $KEYRING --id volumes rbd perf image stats volumes
+ ceph -k $KEYRING --id volumes rbd perf image stats images
+
+ # read/write commands should only work on 'volumes'
+ rbd -k $KEYRING --id volumes create --image-format 2 --image-feature $IMAGE_FEATURES -s 1 volumes/foo
+ ceph -k $KEYRING --id volumes rbd task add remove volumes/foo
+ expect 13 ceph -k $KEYRING --id volumes rbd task add remove images/foo
+}
+
+cleanup() {
+ rm -f $KEYRING
+}
+
+KEYRING=$(mktemp)
+trap cleanup EXIT ERR HUP INT QUIT
+
+delete_users
+create_users
+
+recreate_pools
+test_images_access
+
+recreate_pools
+test_volumes_access
+
+test_remove_self_managed_snapshots
+
+test_rbd_support
+
+delete_pools
+delete_users
+
+echo OK
+exit 0
diff --git a/qa/workunits/rbd/qemu-iotests.sh b/qa/workunits/rbd/qemu-iotests.sh
new file mode 100755
index 000000000..a2e9e0600
--- /dev/null
+++ b/qa/workunits/rbd/qemu-iotests.sh
@@ -0,0 +1,47 @@
+#!/bin/sh -ex
+
+# Run qemu-iotests against rbd. These are block-level tests that go
+# through qemu but do not involve running a full vm. Note that these
+# require the admin ceph user, as there's no way to pass the ceph user
+# to qemu-iotests currently.
+
+testlist='001 002 003 004 005 008 009 010 011 021 025 032 033'
+
+git clone https://github.com/qemu/qemu.git
+cd qemu
+
+
+if grep -iqE '(bionic|focal|jammy|platform:el9)' /etc/os-release; then
+ git checkout v2.11.0
+elif grep -iqE '(xenial|platform:el8)' /etc/os-release; then
+ git checkout v2.3.0
+else
+ # use v2.2.0-rc3 (last released version that handles all the tests
+ git checkout 2528043f1f299e0e88cb026f1ca7c40bbb4e1f80
+fi
+
+cd tests/qemu-iotests
+# qemu-iotests expects a binary called just 'qemu' to be available
+if [ -x '/usr/bin/qemu-system-x86_64' ]
+then
+ QEMU='/usr/bin/qemu-system-x86_64'
+else
+ QEMU='/usr/libexec/qemu-kvm'
+fi
+
+# Bionic (v2.11.0) tests expect all tools in current directory
+ln -s $QEMU qemu
+ln -s /usr/bin/qemu-img
+ln -s /usr/bin/qemu-io
+ln -s /usr/bin/qemu-nbd
+
+# this is normally generated by configure, but has nothing but a python
+# binary definition, which we don't care about. for some reason it is
+# not present on trusty.
+touch common.env
+
+# TEST_DIR is the pool for rbd
+TEST_DIR=rbd ./check -rbd $testlist
+
+cd ../../..
+rm -rf qemu
diff --git a/qa/workunits/rbd/qemu_dynamic_features.sh b/qa/workunits/rbd/qemu_dynamic_features.sh
new file mode 100755
index 000000000..70e9fbb3c
--- /dev/null
+++ b/qa/workunits/rbd/qemu_dynamic_features.sh
@@ -0,0 +1,46 @@
+#!/usr/bin/env bash
+set -x
+
+if [[ -z "${IMAGE_NAME}" ]]; then
+ echo image name must be provided
+ exit 1
+fi
+
+is_qemu_running() {
+ rbd status ${IMAGE_NAME} | grep -v "Watchers: none"
+}
+
+wait_for_qemu() {
+ while ! is_qemu_running ; do
+ echo "*** Waiting for QEMU"
+ sleep 30
+ done
+}
+
+wait_for_qemu
+rbd feature disable ${IMAGE_NAME} journaling
+rbd feature disable ${IMAGE_NAME} object-map
+rbd feature disable ${IMAGE_NAME} exclusive-lock
+
+while is_qemu_running ; do
+ echo "*** Enabling all features"
+ rbd feature enable ${IMAGE_NAME} exclusive-lock || break
+ rbd feature enable ${IMAGE_NAME} journaling || break
+ rbd feature enable ${IMAGE_NAME} object-map || break
+ if is_qemu_running ; then
+ sleep 60
+ fi
+
+ echo "*** Disabling all features"
+ rbd feature disable ${IMAGE_NAME} journaling || break
+ rbd feature disable ${IMAGE_NAME} object-map || break
+ rbd feature disable ${IMAGE_NAME} exclusive-lock || break
+ if is_qemu_running ; then
+ sleep 60
+ fi
+done
+
+if is_qemu_running ; then
+ echo "RBD command failed on alive QEMU"
+ exit 1
+fi
diff --git a/qa/workunits/rbd/qemu_rebuild_object_map.sh b/qa/workunits/rbd/qemu_rebuild_object_map.sh
new file mode 100755
index 000000000..2647dcdcd
--- /dev/null
+++ b/qa/workunits/rbd/qemu_rebuild_object_map.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+set -ex
+
+if [[ -z "${IMAGE_NAME}" ]]; then
+ echo image name must be provided
+ exit 1
+fi
+
+is_qemu_running() {
+ rbd status ${IMAGE_NAME} | grep -v "Watchers: none"
+}
+
+wait_for_qemu() {
+ while ! is_qemu_running ; do
+ echo "*** Waiting for QEMU"
+ sleep 30
+ done
+}
+
+wait_for_qemu
+rbd feature disable ${IMAGE_NAME} journaling || true
+rbd feature disable ${IMAGE_NAME} fast-diff || true
+rbd feature disable ${IMAGE_NAME} object-map || true
+rbd feature disable ${IMAGE_NAME} exclusive-lock || true
+
+rbd feature enable ${IMAGE_NAME} exclusive-lock
+rbd feature enable ${IMAGE_NAME} object-map
+
+while is_qemu_running ; do
+ echo "*** Rebuilding object map"
+ rbd object-map rebuild ${IMAGE_NAME}
+
+ if is_qemu_running ; then
+ sleep 60
+ fi
+done
+
diff --git a/qa/workunits/rbd/qos.sh b/qa/workunits/rbd/qos.sh
new file mode 100755
index 000000000..feb1d5144
--- /dev/null
+++ b/qa/workunits/rbd/qos.sh
@@ -0,0 +1,90 @@
+#!/bin/sh -ex
+
+POOL=rbd
+IMAGE=test$$
+IMAGE_SIZE=1G
+TOLERANCE_PRCNT=10
+
+rbd_bench() {
+ local image=$1
+ local type=$2
+ local total=$3
+ local qos_type=$4
+ local qos_limit=$5
+ local iops_var_name=$6
+ local bps_var_name=$7
+ local timeout=$8
+ local timeout_cmd=""
+
+ if [ -n "${timeout}" ]; then
+ timeout_cmd="timeout --preserve-status ${timeout}"
+ fi
+
+ # parse `rbd bench` output for string like this:
+ # elapsed: 25 ops: 2560 ops/sec: 100.08 bytes/sec: 409.13 MiB
+ iops_bps=$(${timeout_cmd} rbd bench "${image}" \
+ --io-type ${type} --io-size 4K \
+ --io-total ${total} --rbd-cache=false \
+ --rbd_qos_${qos_type}_limit ${qos_limit} |
+ awk '/elapsed:.* GiB/ {print int($6) ":" int($8) * 1024 * 1024 * 1024}
+ /elapsed:.* MiB/ {print int($6) ":" int($8) * 1024 * 1024}
+ /elapsed:.* KiB/ {print int($6) ":" int($8) * 1024}
+ /elapsed:.* B/ {print int($6) ":" int($8)}')
+ eval ${iops_var_name}=${iops_bps%:*}
+ eval ${bps_var_name}=${iops_bps#*:}
+}
+
+rbd create "${POOL}/${IMAGE}" -s ${IMAGE_SIZE}
+rbd bench "${POOL}/${IMAGE}" --io-type write --io-size 4M --io-total ${IMAGE_SIZE}
+
+rbd_bench "${POOL}/${IMAGE}" write ${IMAGE_SIZE} iops 0 iops bps 60
+iops_unlimited=$iops
+bps_unlimited=$bps
+
+test "${iops_unlimited}" -ge 20 || exit 0
+
+io_total=$((bps_unlimited * 30))
+
+rbd_bench "${POOL}/${IMAGE}" write ${io_total} iops $((iops_unlimited / 2)) iops bps
+test "${iops}" -le $((iops_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+rbd_bench "${POOL}/${IMAGE}" write ${io_total} write_iops $((iops_unlimited / 2)) iops bps
+test "${iops}" -le $((iops_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+rbd_bench "${POOL}/${IMAGE}" write ${io_total} bps $((bps_unlimited / 2)) iops bps
+test "${bps}" -le $((bps_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+rbd_bench "${POOL}/${IMAGE}" write ${io_total} write_bps $((bps_unlimited / 2)) iops bps
+test "${bps}" -le $((bps_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+rbd_bench "${POOL}/${IMAGE}" read ${io_total} iops 0 iops bps
+iops_unlimited=$iops
+bps_unlimited=$bps
+
+test "${iops_unlimited}" -ge 20 || exit 0
+
+io_total=$((bps_unlimited * 30))
+
+rbd_bench "${POOL}/${IMAGE}" read ${io_total} iops $((iops_unlimited / 2)) iops bps
+test "${iops}" -le $((iops_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+rbd_bench "${POOL}/${IMAGE}" read ${io_total} read_iops $((iops_unlimited / 2)) iops bps
+test "${iops}" -le $((iops_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+rbd_bench "${POOL}/${IMAGE}" read ${io_total} bps $((bps_unlimited / 2)) iops bps
+test "${bps}" -le $((bps_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+rbd_bench "${POOL}/${IMAGE}" read ${io_total} read_bps $((bps_unlimited / 2)) iops bps
+test "${bps}" -le $((bps_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+# test a config override is applied
+rbd config image set "${POOL}/${IMAGE}" rbd_qos_iops_limit $((iops_unlimited / 4))
+rbd_bench "${POOL}/${IMAGE}" read ${io_total} iops $((iops_unlimited / 2)) iops bps
+test "${iops}" -le $((iops_unlimited / 4 * (100 + TOLERANCE_PRCNT) / 100))
+rbd config image remove "${POOL}/${IMAGE}" rbd_qos_iops_limit
+rbd_bench "${POOL}/${IMAGE}" read ${io_total} iops $((iops_unlimited / 2)) iops bps
+test "${iops}" -le $((iops_unlimited / 2 * (100 + TOLERANCE_PRCNT) / 100))
+
+rbd rm "${POOL}/${IMAGE}"
+
+echo OK
diff --git a/qa/workunits/rbd/rbd-ggate.sh b/qa/workunits/rbd/rbd-ggate.sh
new file mode 100755
index 000000000..1bf89da38
--- /dev/null
+++ b/qa/workunits/rbd/rbd-ggate.sh
@@ -0,0 +1,239 @@
+#!/bin/sh -ex
+
+POOL=testrbdggate$$
+NS=ns
+IMAGE=test
+SIZE=64
+DATA=
+DEV=
+
+if which xmlstarlet > /dev/null 2>&1; then
+ XMLSTARLET=xmlstarlet
+elif which xml > /dev/null 2>&1; then
+ XMLSTARLET=xml
+else
+ echo "Missing xmlstarlet binary!"
+ exit 1
+fi
+
+if [ `uname -K` -ge 1200078 ] ; then
+ RBD_GGATE_RESIZE_SUPPORTED=1
+fi
+
+_sudo()
+{
+ local cmd
+
+ if [ `id -u` -eq 0 ]
+ then
+ "$@"
+ return $?
+ fi
+
+ # Look for the command in the user path. If it fails run it as is,
+ # supposing it is in sudo path.
+ cmd=`which $1 2>/dev/null` || cmd=$1
+ shift
+ sudo -nE "${cmd}" "$@"
+}
+
+check_geom_gate()
+{
+ # See if geom_date is load, or can be loaded.
+ # Otherwise the tests can not run
+ if ! kldstat -q -n geom_gate ; then
+ # See if we can load it
+ if ! _sudo kldload geom_gate ; then
+ echo Not able to load geom_gate
+ echo check /var/log/messages as to why
+ exit 1
+ fi
+ fi
+}
+
+setup()
+{
+ local ns x
+
+ if [ -e CMakeCache.txt ]; then
+ # running under cmake build dir
+
+ CEPH_SRC=$(readlink -f $(dirname $0)/../../../src)
+ CEPH_ROOT=${PWD}
+ CEPH_BIN=${CEPH_ROOT}/bin
+
+ export LD_LIBRARY_PATH=${CEPH_ROOT}/lib:${LD_LIBRARY_PATH}
+ export PYTHONPATH=${PYTHONPATH}:${CEPH_SRC}/pybind:${CEPH_ROOT}/lib/cython_modules/lib.3
+ PATH=${CEPH_BIN}:${PATH}
+ fi
+
+ _sudo echo test sudo
+ check_geom_gate
+
+ trap cleanup INT TERM EXIT
+ TEMPDIR=`mktemp -d`
+ DATA=${TEMPDIR}/data
+ dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
+ ceph osd pool create ${POOL} 32
+
+ rbd namespace create ${POOL}/${NS}
+ for ns in '' ${NS}; do
+ rbd --dest-pool ${POOL} --dest-namespace "${ns}" --no-progress import \
+ ${DATA} ${IMAGE}
+ done
+}
+
+cleanup()
+{
+ local ns s
+
+ set +e
+ rm -Rf ${TEMPDIR}
+ if [ -n "${DEV}" ]
+ then
+ _sudo rbd-ggate unmap ${DEV}
+ fi
+
+ ceph osd pool delete ${POOL} ${POOL} --yes-i-really-really-mean-it
+}
+
+expect_false()
+{
+ if "$@"; then return 1; else return 0; fi
+}
+
+#
+# main
+#
+
+setup
+
+echo exit status test
+expect_false rbd-ggate
+expect_false rbd-ggate INVALIDCMD
+if [ `id -u` -ne 0 ]
+then
+ expect_false rbd-ggate map ${IMAGE}
+fi
+expect_false _sudo rbd-ggate map INVALIDIMAGE
+
+echo map test using the first unused device
+DEV=`_sudo rbd-ggate map ${POOL}/${IMAGE}`
+rbd-ggate list | grep " ${DEV} *$"
+
+echo map test specifying the device
+expect_false _sudo rbd-ggate --device ${DEV} map ${POOL}/${IMAGE}
+dev1=${DEV}
+_sudo rbd-ggate unmap ${DEV}
+rbd-ggate list | expect_false grep " ${DEV} *$"
+DEV=
+# XXX: race possible when the device is reused by other process
+DEV=`_sudo rbd-ggate --device ${dev1} map ${POOL}/${IMAGE}`
+[ "${DEV}" = "${dev1}" ]
+rbd-ggate list | grep " ${DEV} *$"
+
+echo list format test
+expect_false _sudo rbd-ggate --format INVALID list
+rbd-ggate --format json --pretty-format list
+rbd-ggate --format xml list
+
+echo read test
+[ "`dd if=${DATA} bs=1M | md5`" = "`_sudo dd if=${DEV} bs=1M | md5`" ]
+
+echo write test
+dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
+_sudo dd if=${DATA} of=${DEV} bs=1M
+_sudo sync
+[ "`dd if=${DATA} bs=1M | md5`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5`" ]
+
+echo trim test
+provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
+used=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
+[ "${used}" -eq "${provisioned}" ]
+_sudo newfs -E ${DEV}
+_sudo sync
+provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
+used=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
+[ "${used}" -lt "${provisioned}" ]
+
+echo resize test
+devname=$(basename ${DEV})
+size=$(geom gate list ${devname} | awk '$1 ~ /Mediasize:/ {print $2}')
+test -n "${size}"
+rbd resize ${POOL}/${IMAGE} --size $((SIZE * 2))M
+rbd info ${POOL}/${IMAGE}
+if [ -z "$RBD_GGATE_RESIZE_SUPPORTED" ]; then
+ # when resizing is not supported:
+ # resizing the underlying image for a GEOM ggate will stop the
+ # ggate process servicing the device. So we can resize and test
+ # the disappearance of the device
+ rbd-ggate list | expect_false grep " ${DEV} *$"
+else
+ rbd-ggate list | grep " ${DEV} *$"
+ size2=$(geom gate list ${devname} | awk '$1 ~ /Mediasize:/ {print $2}')
+ test -n "${size2}"
+ test ${size2} -eq $((size * 2))
+ dd if=/dev/urandom of=${DATA} bs=1M count=$((SIZE * 2))
+ _sudo dd if=${DATA} of=${DEV} bs=1M
+ _sudo sync
+ [ "`dd if=${DATA} bs=1M | md5`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5`" ]
+ rbd resize ${POOL}/${IMAGE} --allow-shrink --size ${SIZE}M
+ rbd info ${POOL}/${IMAGE}
+ size2=$(geom gate list ${devname} | awk '$1 ~ /Mediasize:/ {print $2}')
+ test -n "${size2}"
+ test ${size2} -eq ${size}
+ truncate -s ${SIZE}M ${DATA}
+ [ "`dd if=${DATA} bs=1M | md5`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5`" ]
+ _sudo rbd-ggate unmap ${DEV}
+fi
+DEV=
+
+echo read-only option test
+DEV=`_sudo rbd-ggate map --read-only ${POOL}/${IMAGE}`
+devname=$(basename ${DEV})
+rbd-ggate list | grep " ${DEV} *$"
+access=$(geom gate list ${devname} | awk '$1 == "access:" {print $2}')
+test "${access}" = "read-only"
+_sudo dd if=${DEV} of=/dev/null bs=1M
+expect_false _sudo dd if=${DATA} of=${DEV} bs=1M
+_sudo rbd-ggate unmap ${DEV}
+
+echo exclusive option test
+DEV=`_sudo rbd-ggate map --exclusive ${POOL}/${IMAGE}`
+rbd-ggate list | grep " ${DEV} *$"
+_sudo dd if=${DATA} of=${DEV} bs=1M
+_sudo sync
+expect_false timeout 10 \
+ rbd -p ${POOL} bench ${IMAGE} --io-type=write --io-size=1024 --io-total=1024
+_sudo rbd-ggate unmap ${DEV}
+DEV=
+rbd bench -p ${POOL} ${IMAGE} --io-type=write --io-size=1024 --io-total=1024
+
+echo unmap by image name test
+DEV=`_sudo rbd-ggate map ${POOL}/${IMAGE}`
+rbd-ggate list | grep " ${DEV} *$"
+_sudo rbd-ggate unmap "${POOL}/${IMAGE}"
+rbd-ggate list | expect_false grep " ${DEV} *$"
+DEV=
+
+echo map/unmap snap test
+rbd snap create ${POOL}/${IMAGE}@snap
+DEV=`_sudo rbd-ggate map ${POOL}/${IMAGE}@snap`
+rbd-ggate list | grep " ${DEV} *$"
+_sudo rbd-ggate unmap "${POOL}/${IMAGE}@snap"
+rbd-ggate list | expect_false grep " ${DEV} *$"
+DEV=
+
+echo map/unmap namespace test
+rbd snap create ${POOL}/${NS}/${IMAGE}@snap
+DEV=`_sudo rbd-ggate map ${POOL}/${NS}/${IMAGE}@snap`
+rbd-ggate list | grep " ${DEV} *$"
+_sudo rbd-ggate unmap "${POOL}/${NS}/${IMAGE}@snap"
+rbd-ggate list | expect_false grep "${DEV} $"
+DEV=
+
+echo OK
diff --git a/qa/workunits/rbd/rbd-nbd.sh b/qa/workunits/rbd/rbd-nbd.sh
new file mode 100755
index 000000000..bc89e9be5
--- /dev/null
+++ b/qa/workunits/rbd/rbd-nbd.sh
@@ -0,0 +1,500 @@
+#!/usr/bin/env bash
+set -ex
+
+. $(dirname $0)/../../standalone/ceph-helpers.sh
+
+POOL=rbd
+ANOTHER_POOL=new_default_pool$$
+NS=ns
+IMAGE=testrbdnbd$$
+SIZE=64
+DATA=
+DEV=
+
+_sudo()
+{
+ local cmd
+
+ if [ `id -u` -eq 0 ]
+ then
+ "$@"
+ return $?
+ fi
+
+ # Look for the command in the user path. If it fails run it as is,
+ # supposing it is in sudo path.
+ cmd=`which $1 2>/dev/null` || cmd=$1
+ shift
+ sudo -nE "${cmd}" "$@"
+}
+
+setup()
+{
+ local ns x
+
+ if [ -e CMakeCache.txt ]; then
+ # running under cmake build dir
+
+ CEPH_SRC=$(readlink -f $(dirname $0)/../../../src)
+ CEPH_ROOT=${PWD}
+ CEPH_BIN=${CEPH_ROOT}/bin
+
+ export LD_LIBRARY_PATH=${CEPH_ROOT}/lib:${LD_LIBRARY_PATH}
+ export PYTHONPATH=${PYTHONPATH}:${CEPH_SRC}/pybind:${CEPH_ROOT}/lib/cython_modules/lib.3
+ PATH=${CEPH_BIN}:${PATH}
+ fi
+
+ _sudo echo test sudo
+
+ trap cleanup INT TERM EXIT
+ TEMPDIR=`mktemp -d`
+ DATA=${TEMPDIR}/data
+ dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
+
+ rbd namespace create ${POOL}/${NS}
+
+ for ns in '' ${NS}; do
+ rbd --dest-pool ${POOL} --dest-namespace "${ns}" --no-progress import \
+ ${DATA} ${IMAGE}
+ done
+
+ # create another pool
+ ceph osd pool create ${ANOTHER_POOL} 8
+ rbd pool init ${ANOTHER_POOL}
+}
+
+function cleanup()
+{
+ local ns s
+
+ set +e
+
+ mount | fgrep ${TEMPDIR}/mnt && _sudo umount -f ${TEMPDIR}/mnt
+
+ rm -Rf ${TEMPDIR}
+ if [ -n "${DEV}" ]
+ then
+ _sudo rbd device --device-type nbd unmap ${DEV}
+ fi
+
+ for ns in '' ${NS}; do
+ if rbd -p ${POOL} --namespace "${ns}" status ${IMAGE} 2>/dev/null; then
+ for s in 0.5 1 2 4 8 16 32; do
+ sleep $s
+ rbd -p ${POOL} --namespace "${ns}" status ${IMAGE} |
+ grep 'Watchers: none' && break
+ done
+ rbd -p ${POOL} --namespace "${ns}" snap purge ${IMAGE}
+ rbd -p ${POOL} --namespace "${ns}" remove ${IMAGE}
+ fi
+ done
+ rbd namespace remove ${POOL}/${NS}
+
+ # cleanup/reset default pool
+ rbd config global rm global rbd_default_pool
+ ceph osd pool delete ${ANOTHER_POOL} ${ANOTHER_POOL} --yes-i-really-really-mean-it
+}
+
+function expect_false()
+{
+ if "$@"; then return 1; else return 0; fi
+}
+
+function get_pid()
+{
+ local pool=$1
+ local ns=$2
+
+ PID=$(rbd device --device-type nbd --format xml list | $XMLSTARLET sel -t -v \
+ "//devices/device[pool='${pool}'][namespace='${ns}'][image='${IMAGE}'][device='${DEV}']/id")
+ test -n "${PID}" || return 1
+ ps -p ${PID} -C rbd-nbd
+}
+
+unmap_device()
+{
+ local args=$1
+ local pid=$2
+
+ _sudo rbd device --device-type nbd unmap ${args}
+ rbd device --device-type nbd list | expect_false grep "^${pid}\\b" || return 1
+ ps -C rbd-nbd | expect_false grep "^ *${pid}\\b" || return 1
+
+ # workaround possible race between unmap and following map
+ sleep 0.5
+}
+
+#
+# main
+#
+
+setup
+
+# exit status test
+expect_false rbd-nbd
+expect_false rbd-nbd INVALIDCMD
+if [ `id -u` -ne 0 ]
+then
+ expect_false rbd device --device-type nbd map ${IMAGE}
+fi
+expect_false _sudo rbd device --device-type nbd map INVALIDIMAGE
+expect_false _sudo rbd-nbd --device INVALIDDEV map ${IMAGE}
+
+# list format test
+expect_false rbd device --device-type nbd --format INVALID list
+rbd device --device-type nbd --format json --pretty-format list
+rbd device --device-type nbd --format xml list
+
+# map test using the first unused device
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+# map test specifying the device
+expect_false _sudo rbd-nbd --device ${DEV} map ${POOL}/${IMAGE}
+dev1=${DEV}
+unmap_device ${DEV} ${PID}
+DEV=
+# XXX: race possible when the device is reused by other process
+DEV=`_sudo rbd-nbd --device ${dev1} map ${POOL}/${IMAGE}`
+[ "${DEV}" = "${dev1}" ]
+rbd device --device-type nbd list | grep "${IMAGE}"
+get_pid ${POOL}
+
+# read test
+[ "`dd if=${DATA} bs=1M | md5sum`" = "`_sudo dd if=${DEV} bs=1M | md5sum`" ]
+
+# write test
+dd if=/dev/urandom of=${DATA} bs=1M count=${SIZE}
+_sudo dd if=${DATA} of=${DEV} bs=1M oflag=direct
+[ "`dd if=${DATA} bs=1M | md5sum`" = "`rbd -p ${POOL} --no-progress export ${IMAGE} - | md5sum`" ]
+unmap_device ${DEV} ${PID}
+
+# notrim test
+DEV=`_sudo rbd device --device-type nbd --options notrim map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
+used=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
+[ "${used}" -eq "${provisioned}" ]
+# should fail discard as at time of mapping notrim was used
+expect_false _sudo blkdiscard ${DEV}
+sync
+provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
+used=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
+[ "${used}" -eq "${provisioned}" ]
+unmap_device ${DEV} ${PID}
+
+# trim test
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
+used=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
+[ "${used}" -eq "${provisioned}" ]
+# should honor discard as at time of mapping trim was considered by default
+_sudo blkdiscard ${DEV}
+sync
+provisioned=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/provisioned_size" -v .`
+used=`rbd -p ${POOL} --format xml du ${IMAGE} |
+ $XMLSTARLET sel -t -m "//stats/images/image/used_size" -v .`
+[ "${used}" -lt "${provisioned}" ]
+
+# resize test
+devname=$(basename ${DEV})
+blocks=$(awk -v dev=${devname} '$4 == dev {print $3}' /proc/partitions)
+test -n "${blocks}"
+rbd resize ${POOL}/${IMAGE} --size $((SIZE * 2))M
+rbd info ${POOL}/${IMAGE}
+blocks2=$(awk -v dev=${devname} '$4 == dev {print $3}' /proc/partitions)
+test -n "${blocks2}"
+test ${blocks2} -eq $((blocks * 2))
+rbd resize ${POOL}/${IMAGE} --allow-shrink --size ${SIZE}M
+blocks2=$(awk -v dev=${devname} '$4 == dev {print $3}' /proc/partitions)
+test -n "${blocks2}"
+test ${blocks2} -eq ${blocks}
+
+# read-only option test
+unmap_device ${DEV} ${PID}
+DEV=`_sudo rbd --device-type nbd map --read-only ${POOL}/${IMAGE}`
+PID=$(rbd device --device-type nbd list | awk -v pool=${POOL} -v img=${IMAGE} -v dev=${DEV} \
+ '$2 == pool && $3 == img && $5 == dev {print $1}')
+test -n "${PID}"
+ps -p ${PID} -C rbd-nbd
+
+_sudo dd if=${DEV} of=/dev/null bs=1M
+expect_false _sudo dd if=${DATA} of=${DEV} bs=1M oflag=direct
+unmap_device ${DEV} ${PID}
+
+# exclusive option test
+DEV=`_sudo rbd --device-type nbd map --exclusive ${POOL}/${IMAGE}`
+get_pid ${POOL}
+
+_sudo dd if=${DATA} of=${DEV} bs=1M oflag=direct
+expect_false timeout 10 \
+ rbd bench ${IMAGE} --io-type write --io-size=1024 --io-total=1024
+unmap_device ${DEV} ${PID}
+DEV=
+rbd bench ${IMAGE} --io-type write --io-size=1024 --io-total=1024
+
+# unmap by image name test
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+unmap_device ${IMAGE} ${PID}
+DEV=
+
+# map/unmap snap test
+rbd snap create ${POOL}/${IMAGE}@snap
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}@snap`
+get_pid ${POOL}
+unmap_device "${IMAGE}@snap" ${PID}
+DEV=
+
+# map/unmap snap test with --snap-id
+SNAPID=`rbd snap ls ${POOL}/${IMAGE} | awk '$2 == "snap" {print $1}'`
+DEV=`_sudo rbd device --device-type nbd map --snap-id ${SNAPID} ${POOL}/${IMAGE}`
+get_pid ${POOL}
+unmap_device "--snap-id ${SNAPID} ${IMAGE}" ${PID}
+DEV=
+
+# map/unmap namespace test
+rbd snap create ${POOL}/${NS}/${IMAGE}@snap
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${NS}/${IMAGE}@snap`
+get_pid ${POOL} ${NS}
+unmap_device "${POOL}/${NS}/${IMAGE}@snap" ${PID}
+DEV=
+
+# map/unmap namespace test with --snap-id
+SNAPID=`rbd snap ls ${POOL}/${NS}/${IMAGE} | awk '$2 == "snap" {print $1}'`
+DEV=`_sudo rbd device --device-type nbd map --snap-id ${SNAPID} ${POOL}/${NS}/${IMAGE}`
+get_pid ${POOL} ${NS}
+unmap_device "--snap-id ${SNAPID} ${POOL}/${NS}/${IMAGE}" ${PID}
+DEV=
+
+# map/unmap namespace using options test
+DEV=`_sudo rbd device --device-type nbd map --pool ${POOL} --namespace ${NS} --image ${IMAGE}`
+get_pid ${POOL} ${NS}
+unmap_device "--pool ${POOL} --namespace ${NS} --image ${IMAGE}" ${PID}
+DEV=`_sudo rbd device --device-type nbd map --pool ${POOL} --namespace ${NS} --image ${IMAGE} --snap snap`
+get_pid ${POOL} ${NS}
+unmap_device "--pool ${POOL} --namespace ${NS} --image ${IMAGE} --snap snap" ${PID}
+DEV=
+
+# unmap by image name test 2
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+pid=$PID
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${NS}/${IMAGE}`
+get_pid ${POOL} ${NS}
+unmap_device ${POOL}/${NS}/${IMAGE} ${PID}
+DEV=
+unmap_device ${POOL}/${IMAGE} ${pid}
+
+# map/unmap test with just image name and expect image to come from default pool
+if [ "${POOL}" = "rbd" ];then
+ DEV=`_sudo rbd device --device-type nbd map ${IMAGE}`
+ get_pid ${POOL}
+ unmap_device ${IMAGE} ${PID}
+ DEV=
+fi
+
+# map/unmap test with just image name after changing default pool
+rbd config global set global rbd_default_pool ${ANOTHER_POOL}
+rbd create --size 10M ${IMAGE}
+DEV=`_sudo rbd device --device-type nbd map ${IMAGE}`
+get_pid ${ANOTHER_POOL}
+unmap_device ${IMAGE} ${PID}
+DEV=
+
+# reset
+rbd config global rm global rbd_default_pool
+
+# auto unmap test
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+_sudo kill ${PID}
+for i in `seq 10`; do
+ rbd device --device-type nbd list | expect_false grep "^${PID} *${POOL} *${IMAGE}" && break
+ sleep 1
+done
+rbd device --device-type nbd list | expect_false grep "^${PID} *${POOL} *${IMAGE}"
+
+# quiesce test
+QUIESCE_HOOK=${TEMPDIR}/quiesce.sh
+DEV=`_sudo rbd device --device-type nbd map --quiesce --quiesce-hook ${QUIESCE_HOOK} ${POOL}/${IMAGE}`
+get_pid ${POOL}
+
+# test it fails if the hook does not exists
+test ! -e ${QUIESCE_HOOK}
+expect_false rbd snap create ${POOL}/${IMAGE}@quiesce1
+_sudo dd if=${DATA} of=${DEV} bs=1M count=1 oflag=direct
+
+# test the hook is executed
+touch ${QUIESCE_HOOK}
+chmod +x ${QUIESCE_HOOK}
+cat > ${QUIESCE_HOOK} <<EOF
+#/bin/sh
+echo "test the hook is executed" >&2
+echo \$1 > ${TEMPDIR}/\$2
+EOF
+rbd snap create ${POOL}/${IMAGE}@quiesce1
+_sudo dd if=${DATA} of=${DEV} bs=1M count=1 oflag=direct
+test "$(cat ${TEMPDIR}/quiesce)" = ${DEV}
+test "$(cat ${TEMPDIR}/unquiesce)" = ${DEV}
+
+# test snap create fails if the hook fails
+touch ${QUIESCE_HOOK}
+chmod +x ${QUIESCE_HOOK}
+cat > ${QUIESCE_HOOK} <<EOF
+#/bin/sh
+echo "test snap create fails if the hook fails" >&2
+exit 22
+EOF
+expect_false rbd snap create ${POOL}/${IMAGE}@quiesce2
+_sudo dd if=${DATA} of=${DEV} bs=1M count=1 oflag=direct
+
+# test the hook is slow
+cat > ${QUIESCE_HOOK} <<EOF
+#/bin/sh
+echo "test the hook is slow" >&2
+sleep 7
+EOF
+rbd snap create ${POOL}/${IMAGE}@quiesce2
+_sudo dd if=${DATA} of=${DEV} bs=1M count=1 oflag=direct
+
+# test rbd-nbd_quiesce hook that comes with distribution
+unmap_device ${DEV} ${PID}
+LOG_FILE=${TEMPDIR}/rbd-nbd.log
+if [ -n "${CEPH_SRC}" ]; then
+ QUIESCE_HOOK=${CEPH_SRC}/tools/rbd_nbd/rbd-nbd_quiesce
+ DEV=`_sudo rbd device --device-type nbd map --quiesce --quiesce-hook ${QUIESCE_HOOK} \
+ ${POOL}/${IMAGE} --log-file=${LOG_FILE}`
+else
+ DEV=`_sudo rbd device --device-type nbd map --quiesce ${POOL}/${IMAGE} --log-file=${LOG_FILE}`
+fi
+get_pid ${POOL}
+_sudo mkfs ${DEV}
+mkdir ${TEMPDIR}/mnt
+_sudo mount ${DEV} ${TEMPDIR}/mnt
+rbd snap create ${POOL}/${IMAGE}@quiesce3
+_sudo dd if=${DATA} of=${TEMPDIR}/mnt/test bs=1M count=1 oflag=direct
+_sudo umount ${TEMPDIR}/mnt
+unmap_device ${DEV} ${PID}
+DEV=
+cat ${LOG_FILE}
+expect_false grep 'quiesce failed' ${LOG_FILE}
+
+# test detach/attach
+OUT=`_sudo rbd device --device-type nbd --options try-netlink,show-cookie map ${POOL}/${IMAGE}`
+read DEV COOKIE <<< "${OUT}"
+get_pid ${POOL}
+_sudo mount ${DEV} ${TEMPDIR}/mnt
+_sudo rbd device detach ${POOL}/${IMAGE} --device-type nbd
+expect_false get_pid ${POOL}
+expect_false _sudo rbd device attach --device ${DEV} ${POOL}/${IMAGE} --device-type nbd
+if [ -n "${COOKIE}" ]; then
+ _sudo rbd device attach --device ${DEV} --cookie ${COOKIE} ${POOL}/${IMAGE} --device-type nbd
+else
+ _sudo rbd device attach --device ${DEV} ${POOL}/${IMAGE} --device-type nbd --force
+fi
+get_pid ${POOL}
+_sudo rbd device detach ${DEV} --device-type nbd
+expect_false get_pid ${POOL}
+if [ -n "${COOKIE}" ]; then
+ _sudo rbd device attach --device ${DEV} --cookie ${COOKIE} ${POOL}/${IMAGE} --device-type nbd
+else
+ _sudo rbd device attach --device ${DEV} ${POOL}/${IMAGE} --device-type nbd --force
+fi
+get_pid ${POOL}
+ls ${TEMPDIR}/mnt/
+dd if=${TEMPDIR}/mnt/test of=/dev/null bs=1M count=1
+_sudo dd if=${DATA} of=${TEMPDIR}/mnt/test1 bs=1M count=1 oflag=direct
+_sudo umount ${TEMPDIR}/mnt
+unmap_device ${DEV} ${PID}
+# if kernel supports cookies
+if [ -n "${COOKIE}" ]; then
+ OUT=`_sudo rbd device --device-type nbd --show-cookie --cookie "abc de" --options try-netlink map ${POOL}/${IMAGE}`
+ read DEV ANOTHER_COOKIE <<< "${OUT}"
+ get_pid ${POOL}
+ test "${ANOTHER_COOKIE}" = "abc de"
+ unmap_device ${DEV} ${PID}
+fi
+DEV=
+
+# test detach/attach with --snap-id
+SNAPID=`rbd snap ls ${POOL}/${IMAGE} | awk '$2 == "snap" {print $1}'`
+OUT=`_sudo rbd device --device-type nbd --options try-netlink,show-cookie map --snap-id ${SNAPID} ${POOL}/${IMAGE}`
+read DEV COOKIE <<< "${OUT}"
+get_pid ${POOL}
+_sudo rbd device detach ${POOL}/${IMAGE} --snap-id ${SNAPID} --device-type nbd
+expect_false get_pid ${POOL}
+expect_false _sudo rbd device attach --device ${DEV} --snap-id ${SNAPID} ${POOL}/${IMAGE} --device-type nbd
+if [ -n "${COOKIE}" ]; then
+ _sudo rbd device attach --device ${DEV} --cookie ${COOKIE} --snap-id ${SNAPID} ${POOL}/${IMAGE} --device-type nbd
+else
+ _sudo rbd device attach --device ${DEV} --snap-id ${SNAPID} ${POOL}/${IMAGE} --device-type nbd --force
+fi
+get_pid ${POOL}
+_sudo rbd device detach ${DEV} --device-type nbd
+expect_false get_pid ${POOL}
+DEV=
+
+# test discard granularity with journaling
+rbd config image set ${POOL}/${IMAGE} rbd_discard_granularity_bytes 4096
+rbd feature enable ${POOL}/${IMAGE} journaling
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+# since a discard will now be pruned to only whole blocks (0..4095, 4096..8191)
+# let us test all the cases around those alignments. 512 is the smallest
+# possible block blkdiscard allows us to use. Thus the test checks
+# 512 before, on the alignment, 512 after.
+_sudo blkdiscard --offset 0 --length $((4096-512)) ${DEV}
+_sudo blkdiscard --offset 0 --length 4096 ${DEV}
+_sudo blkdiscard --offset 0 --length $((4096+512)) ${DEV}
+_sudo blkdiscard --offset 512 --length $((8192-1024)) ${DEV}
+_sudo blkdiscard --offset 512 --length $((8192-512)) ${DEV}
+_sudo blkdiscard --offset 512 --length 8192 ${DEV}
+# wait for commit log to be empty, 10 seconds should be well enough
+tries=0
+queue_length=`rbd journal inspect --pool ${POOL} --image ${IMAGE} | awk '/entries inspected/ {print $1}'`
+while [ ${tries} -lt 10 ] && [ ${queue_length} -gt 0 ]; do
+ rbd journal inspect --pool ${POOL} --image ${IMAGE} --verbose
+ sleep 1
+ queue_length=`rbd journal inspect --pool ${POOL} --image ${IMAGE} | awk '/entries inspected/ {print $1}'`
+ tries=$((tries+1))
+done
+[ ${queue_length} -eq 0 ]
+unmap_device ${DEV} ${PID}
+DEV=
+rbd feature disable ${POOL}/${IMAGE} journaling
+rbd config image rm ${POOL}/${IMAGE} rbd_discard_granularity_bytes
+
+# test that disabling a feature so that the op is proxied to rbd-nbd
+# (arranged here by blkdiscard before "rbd feature disable") doesn't hang
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+rbd feature enable ${POOL}/${IMAGE} journaling
+_sudo blkdiscard --offset 0 --length 4096 ${DEV}
+rbd feature disable ${POOL}/${IMAGE} journaling
+unmap_device ${DEV} ${PID}
+DEV=
+
+# test that rbd_op_threads setting takes effect
+EXPECTED=`ceph-conf --show-config-value librados_thread_count`
+DEV=`_sudo rbd device --device-type nbd map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+ACTUAL=`ps -p ${PID} -T | grep -c io_context_pool`
+[ ${ACTUAL} -eq ${EXPECTED} ]
+unmap_device ${DEV} ${PID}
+EXPECTED=$((EXPECTED * 3 + 1))
+DEV=`_sudo rbd device --device-type nbd --rbd-op-threads ${EXPECTED} map ${POOL}/${IMAGE}`
+get_pid ${POOL}
+ACTUAL=`ps -p ${PID} -T | grep -c io_context_pool`
+[ ${ACTUAL} -eq ${EXPECTED} ]
+unmap_device ${DEV} ${PID}
+DEV=
+
+echo OK
diff --git a/qa/workunits/rbd/rbd_groups.sh b/qa/workunits/rbd/rbd_groups.sh
new file mode 100755
index 000000000..a32618484
--- /dev/null
+++ b/qa/workunits/rbd/rbd_groups.sh
@@ -0,0 +1,258 @@
+#!/usr/bin/env bash
+
+set -ex
+
+#
+# rbd_consistency_groups.sh - test consistency groups cli commands
+#
+
+#
+# Functions
+#
+
+create_group()
+{
+ local group_name=$1
+
+ rbd group create $group_name
+}
+
+list_groups()
+{
+ rbd group list
+}
+
+check_group_exists()
+{
+ local group_name=$1
+ list_groups | grep $group_name
+}
+
+remove_group()
+{
+ local group_name=$1
+
+ rbd group remove $group_name
+}
+
+rename_group()
+{
+ local src_name=$1
+ local dest_name=$2
+
+ rbd group rename $src_name $dest_name
+}
+
+check_group_does_not_exist()
+{
+ local group_name=$1
+ for v in $(list_groups); do
+ if [ "$v" == "$group_name" ]; then
+ return 1
+ fi
+ done
+ return 0
+}
+
+create_image()
+{
+ local image_name=$1
+ rbd create --size 10M $image_name
+}
+
+remove_image()
+{
+ local image_name=$1
+ rbd remove $image_name
+}
+
+add_image_to_group()
+{
+ local image_name=$1
+ local group_name=$2
+ rbd group image add $group_name $image_name
+}
+
+remove_image_from_group()
+{
+ local image_name=$1
+ local group_name=$2
+ rbd group image remove $group_name $image_name
+}
+
+check_image_in_group()
+{
+ local image_name=$1
+ local group_name=$2
+ for v in $(rbd group image list $group_name); do
+ local vtrimmed=${v#*/}
+ if [ "$vtrimmed" = "$image_name" ]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+check_image_not_in_group()
+{
+ local image_name=$1
+ local group_name=$2
+ for v in $(rbd group image list $group_name); do
+ local vtrimmed=${v#*/}
+ if [ "$vtrimmed" = "$image_name" ]; then
+ return 1
+ fi
+ done
+ return 0
+}
+
+create_snapshot()
+{
+ local group_name=$1
+ local snap_name=$2
+ rbd group snap create $group_name@$snap_name
+}
+
+create_snapshots()
+{
+ local group_name=$1
+ local snap_name=$2
+ local snap_count=$3
+ for i in `seq 1 $snap_count`; do
+ rbd group snap create $group_name@$snap_name$i
+ done
+}
+
+remove_snapshot()
+{
+ local group_name=$1
+ local snap_name=$2
+ rbd group snap remove $group_name@$snap_name
+}
+
+remove_snapshots()
+{
+ local group_name=$1
+ local snap_name=$2
+ local snap_count=$3
+ for i in `seq 1 $snap_count`; do
+ rbd group snap remove $group_name@$snap_name$i
+ done
+}
+
+rename_snapshot()
+{
+ local group_name=$1
+ local snap_name=$2
+ local new_snap_name=$3
+ rbd group snap rename $group_name@$snap_name $new_snap_name
+}
+
+list_snapshots()
+{
+ local group_name=$1
+ rbd group snap list $group_name
+}
+
+rollback_snapshot()
+{
+ local group_name=$1
+ local snap_name=$2
+ rbd group snap rollback $group_name@$snap_name
+}
+
+check_snapshot_in_group()
+{
+ local group_name=$1
+ local snap_name=$2
+ list_snapshots $group_name | grep $snap_name
+}
+
+check_snapshots_count_in_group()
+{
+ local group_name=$1
+ local snap_name=$2
+ local expected_count=$3
+ local actual_count
+ actual_count=$(list_snapshots $group_name | grep -c $snap_name)
+ (( actual_count == expected_count ))
+}
+
+check_snapshot_not_in_group()
+{
+ local group_name=$1
+ local snap_name=$2
+ for v in $(list_snapshots $group_name | awk '{print $1}'); do
+ if [ "$v" = "$snap_name" ]; then
+ return 1
+ fi
+ done
+ return 0
+}
+
+echo "TEST: create remove consistency group"
+group="test_consistency_group"
+new_group="test_new_consistency_group"
+create_group $group
+check_group_exists $group
+rename_group $group $new_group
+check_group_exists $new_group
+remove_group $new_group
+check_group_does_not_exist $new_group
+echo "PASSED"
+
+echo "TEST: add remove images to consistency group"
+image="test_image"
+group="test_consistency_group"
+create_image $image
+create_group $group
+add_image_to_group $image $group
+check_image_in_group $image $group
+remove_image_from_group $image $group
+check_image_not_in_group $image $group
+remove_group $group
+remove_image $image
+echo "PASSED"
+
+echo "TEST: create remove snapshots of consistency group"
+image="test_image"
+group="test_consistency_group"
+snap="group_snap"
+new_snap="new_group_snap"
+sec_snap="group_snap2"
+create_image $image
+create_group $group
+add_image_to_group $image $group
+create_snapshot $group $snap
+check_snapshot_in_group $group $snap
+rename_snapshot $group $snap $new_snap
+check_snapshot_not_in_group $group $snap
+create_snapshot $group $sec_snap
+check_snapshot_in_group $group $sec_snap
+rollback_snapshot $group $new_snap
+remove_snapshot $group $new_snap
+check_snapshot_not_in_group $group $new_snap
+remove_snapshot $group $sec_snap
+check_snapshot_not_in_group $group $sec_snap
+remove_group $group
+remove_image $image
+echo "PASSED"
+
+echo "TEST: list snapshots of consistency group"
+image="test_image"
+group="test_consistency_group"
+snap="group_snap"
+create_image $image
+create_group $group
+add_image_to_group $image $group
+create_snapshots $group $snap 10
+check_snapshots_count_in_group $group $snap 10
+remove_snapshots $group $snap 10
+create_snapshots $group $snap 100
+check_snapshots_count_in_group $group $snap 100
+remove_snapshots $group $snap 100
+remove_group $group
+remove_image $image
+echo "PASSED"
+
+echo "OK"
diff --git a/qa/workunits/rbd/rbd_mirror_bootstrap.sh b/qa/workunits/rbd/rbd_mirror_bootstrap.sh
new file mode 100755
index 000000000..6ef06f2b8
--- /dev/null
+++ b/qa/workunits/rbd/rbd_mirror_bootstrap.sh
@@ -0,0 +1,58 @@
+#!/bin/sh -ex
+#
+# rbd_mirror_bootstrap.sh - test peer bootstrap create/import
+#
+
+RBD_MIRROR_MANUAL_PEERS=1
+RBD_MIRROR_INSTANCES=${RBD_MIRROR_INSTANCES:-1}
+. $(dirname $0)/rbd_mirror_helpers.sh
+
+setup
+
+testlog "TEST: bootstrap cluster2 from cluster1"
+# create token on cluster1 and import to cluster2
+TOKEN=${TEMPDIR}/peer-token
+TOKEN_2=${TEMPDIR}/peer-token-2
+CEPH_ARGS='' rbd --cluster ${CLUSTER1} mirror pool peer bootstrap create ${POOL} > ${TOKEN}
+CEPH_ARGS='' rbd --cluster ${CLUSTER1} mirror pool peer bootstrap create ${PARENT_POOL} > ${TOKEN_2}
+cmp ${TOKEN} ${TOKEN_2}
+
+CEPH_ARGS='' rbd --cluster ${CLUSTER2} --pool ${POOL} mirror pool peer bootstrap import ${TOKEN} --direction rx-only
+CEPH_ARGS='' rbd --cluster ${CLUSTER2} --pool ${PARENT_POOL} mirror pool peer bootstrap import ${TOKEN} --direction rx-tx
+
+start_mirrors ${CLUSTER1}
+start_mirrors ${CLUSTER2}
+
+testlog "TEST: verify rx-only direction"
+# rx-only peer is added immediately by "rbd mirror pool peer bootstrap import"
+rbd --cluster ${CLUSTER2} --pool ${POOL} mirror pool info --format json | jq -e '.peers[0].direction == "rx-only"'
+# tx-only peer is added asynchronously by mirror_peer_ping class method
+while ! rbd --cluster ${CLUSTER1} --pool ${POOL} mirror pool info --format json | jq -e '.peers | length > 0'; do
+ sleep 1
+done
+rbd --cluster ${CLUSTER1} --pool ${POOL} mirror pool info --format json | jq -e '.peers[0].direction == "tx-only"'
+
+create_image_and_enable_mirror ${CLUSTER1} ${POOL} image1
+
+wait_for_image_replay_started ${CLUSTER2} ${POOL} image1
+write_image ${CLUSTER1} ${POOL} image1 100
+wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} image1
+
+testlog "TEST: verify rx-tx direction"
+# both rx-tx peers are added immediately by "rbd mirror pool peer bootstrap import"
+rbd --cluster ${CLUSTER1} --pool ${PARENT_POOL} mirror pool info --format json | jq -e '.peers[0].direction == "rx-tx"'
+rbd --cluster ${CLUSTER2} --pool ${PARENT_POOL} mirror pool info --format json | jq -e '.peers[0].direction == "rx-tx"'
+
+create_image ${CLUSTER1} ${PARENT_POOL} image1
+create_image ${CLUSTER2} ${PARENT_POOL} image2
+
+enable_mirror ${CLUSTER1} ${PARENT_POOL} image1
+enable_mirror ${CLUSTER2} ${PARENT_POOL} image2
+
+wait_for_image_replay_started ${CLUSTER2} ${PARENT_POOL} image1
+write_image ${CLUSTER1} ${PARENT_POOL} image1 100
+wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${PARENT_POOL} image1
+
+wait_for_image_replay_started ${CLUSTER1} ${PARENT_POOL} image2
+write_image ${CLUSTER2} ${PARENT_POOL} image2 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${PARENT_POOL} image2
diff --git a/qa/workunits/rbd/rbd_mirror_fsx_compare.sh b/qa/workunits/rbd/rbd_mirror_fsx_compare.sh
new file mode 100755
index 000000000..0ba3c97d7
--- /dev/null
+++ b/qa/workunits/rbd/rbd_mirror_fsx_compare.sh
@@ -0,0 +1,38 @@
+#!/bin/sh -ex
+#
+# rbd_mirror_fsx_compare.sh - test rbd-mirror daemon under FSX workload
+#
+# The script is used to compare FSX-generated images between two clusters.
+#
+
+. $(dirname $0)/rbd_mirror_helpers.sh
+
+trap 'cleanup $?' INT TERM EXIT
+
+setup_tempdir
+
+testlog "TEST: wait for all images"
+image_count=$(rbd --cluster ${CLUSTER1} --pool ${POOL} ls | wc -l)
+retrying_seconds=0
+sleep_seconds=10
+while [ ${retrying_seconds} -le 7200 ]; do
+ [ $(rbd --cluster ${CLUSTER2} --pool ${POOL} ls | wc -l) -ge ${image_count} ] && break
+ sleep ${sleep_seconds}
+ retrying_seconds=$(($retrying_seconds+${sleep_seconds}))
+done
+
+testlog "TEST: snapshot all pool images"
+snap_id=`uuidgen`
+for image in $(rbd --cluster ${CLUSTER1} --pool ${POOL} ls); do
+ create_snapshot ${CLUSTER1} ${POOL} ${image} ${snap_id}
+done
+
+testlog "TEST: wait for snapshots"
+for image in $(rbd --cluster ${CLUSTER1} --pool ${POOL} ls); do
+ wait_for_snap_present ${CLUSTER2} ${POOL} ${image} ${snap_id}
+done
+
+testlog "TEST: compare image snapshots"
+for image in $(rbd --cluster ${CLUSTER1} --pool ${POOL} ls); do
+ compare_image_snapshots ${POOL} ${image}
+done
diff --git a/qa/workunits/rbd/rbd_mirror_fsx_prepare.sh b/qa/workunits/rbd/rbd_mirror_fsx_prepare.sh
new file mode 100755
index 000000000..d988987ba
--- /dev/null
+++ b/qa/workunits/rbd/rbd_mirror_fsx_prepare.sh
@@ -0,0 +1,10 @@
+#!/bin/sh -ex
+#
+# rbd_mirror_fsx_prepare.sh - test rbd-mirror daemon under FSX workload
+#
+# The script is used to compare FSX-generated images between two clusters.
+#
+
+. $(dirname $0)/rbd_mirror_helpers.sh
+
+setup
diff --git a/qa/workunits/rbd/rbd_mirror_ha.sh b/qa/workunits/rbd/rbd_mirror_ha.sh
new file mode 100755
index 000000000..37739a83d
--- /dev/null
+++ b/qa/workunits/rbd/rbd_mirror_ha.sh
@@ -0,0 +1,210 @@
+#!/bin/sh -ex
+#
+# rbd_mirror_ha.sh - test rbd-mirror daemons in HA mode
+#
+
+RBD_MIRROR_INSTANCES=${RBD_MIRROR_INSTANCES:-7}
+
+. $(dirname $0)/rbd_mirror_helpers.sh
+
+setup
+
+is_leader()
+{
+ local instance=$1
+ local pool=$2
+
+ test -n "${pool}" || pool=${POOL}
+
+ admin_daemon "${CLUSTER1}:${instance}" \
+ rbd mirror status ${pool} ${CLUSTER2}${PEER_CLUSTER_SUFFIX} |
+ grep '"leader": true'
+}
+
+wait_for_leader()
+{
+ local s instance
+
+ for s in 1 1 2 4 4 4 4 4 8 8 8 8 16 16 32 64; do
+ sleep $s
+ for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
+ is_leader ${instance} || continue
+ LEADER=${instance}
+ return 0
+ done
+ done
+
+ LEADER=
+ return 1
+}
+
+release_leader()
+{
+ local pool=$1
+ local cmd="rbd mirror leader release"
+
+ test -n "${pool}" && cmd="${cmd} ${pool} ${CLUSTER2}"
+
+ admin_daemon "${CLUSTER1}:${LEADER}" ${cmd}
+}
+
+wait_for_leader_released()
+{
+ local i
+
+ test -n "${LEADER}"
+ for i in `seq 10`; do
+ is_leader ${LEADER} || return 0
+ sleep 1
+ done
+
+ return 1
+}
+
+test_replay()
+{
+ local image
+
+ for image; do
+ wait_for_image_replay_started ${CLUSTER1}:${LEADER} ${POOL} ${image}
+ write_image ${CLUSTER2} ${POOL} ${image} 100
+ wait_for_replay_complete ${CLUSTER1}:${LEADER} ${CLUSTER2} ${POOL} \
+ ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' \
+ 'primary_position' \
+ "${MIRROR_USER_ID_PREFIX}${LEADER} on $(hostname -s)"
+ if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} \
+ 'down+unknown'
+ fi
+ compare_images ${POOL} ${image}
+ done
+}
+
+testlog "TEST: start first daemon instance and test replay"
+start_mirror ${CLUSTER1}:0
+image1=test1
+create_image ${CLUSTER2} ${POOL} ${image1}
+LEADER=0
+test_replay ${image1}
+
+testlog "TEST: release leader and wait it is reacquired"
+is_leader 0 ${POOL}
+is_leader 0 ${PARENT_POOL}
+release_leader ${POOL}
+wait_for_leader_released
+is_leader 0 ${PARENT_POOL}
+wait_for_leader
+release_leader
+wait_for_leader_released
+expect_failure "" is_leader 0 ${PARENT_POOL}
+wait_for_leader
+
+testlog "TEST: start second daemon instance and test replay"
+start_mirror ${CLUSTER1}:1
+image2=test2
+create_image ${CLUSTER2} ${POOL} ${image2}
+test_replay ${image1} ${image2}
+
+testlog "TEST: release leader and test it is acquired by secondary"
+is_leader 0 ${POOL}
+is_leader 0 ${PARENT_POOL}
+release_leader ${POOL}
+wait_for_leader_released
+wait_for_leader
+test_replay ${image1} ${image2}
+release_leader
+wait_for_leader_released
+wait_for_leader
+test "${LEADER}" = 0
+
+testlog "TEST: stop first daemon instance and test replay"
+stop_mirror ${CLUSTER1}:0
+image3=test3
+create_image ${CLUSTER2} ${POOL} ${image3}
+LEADER=1
+test_replay ${image1} ${image2} ${image3}
+
+testlog "TEST: start first daemon instance and test replay"
+start_mirror ${CLUSTER1}:0
+image4=test4
+create_image ${CLUSTER2} ${POOL} ${image4}
+test_replay ${image3} ${image4}
+
+testlog "TEST: crash leader and test replay"
+stop_mirror ${CLUSTER1}:1 -KILL
+image5=test5
+create_image ${CLUSTER2} ${POOL} ${image5}
+LEADER=0
+test_replay ${image1} ${image4} ${image5}
+
+testlog "TEST: start crashed leader and test replay"
+start_mirror ${CLUSTER1}:1
+image6=test6
+create_image ${CLUSTER2} ${POOL} ${image6}
+test_replay ${image1} ${image6}
+
+testlog "TEST: start yet another daemon instance and test replay"
+start_mirror ${CLUSTER1}:2
+image7=test7
+create_image ${CLUSTER2} ${POOL} ${image7}
+test_replay ${image1} ${image7}
+
+testlog "TEST: release leader and test it is acquired by secondary"
+is_leader 0
+release_leader
+wait_for_leader_released
+wait_for_leader
+test_replay ${image1} ${image2}
+
+testlog "TEST: stop leader and test replay"
+stop_mirror ${CLUSTER1}:${LEADER}
+image8=test8
+create_image ${CLUSTER2} ${POOL} ${image8}
+prev_leader=${LEADER}
+wait_for_leader
+test_replay ${image1} ${image8}
+
+testlog "TEST: start previous leader and test replay"
+start_mirror ${CLUSTER1}:${prev_leader}
+image9=test9
+create_image ${CLUSTER2} ${POOL} ${image9}
+test_replay ${image1} ${image9}
+
+testlog "TEST: crash leader and test replay"
+stop_mirror ${CLUSTER1}:${LEADER} -KILL
+image10=test10
+create_image ${CLUSTER2} ${POOL} ${image10}
+prev_leader=${LEADER}
+wait_for_leader
+test_replay ${image1} ${image10}
+
+testlog "TEST: start previous leader and test replay"
+start_mirror ${CLUSTER1}:${prev_leader}
+image11=test11
+create_image ${CLUSTER2} ${POOL} ${image11}
+test_replay ${image1} ${image11}
+
+testlog "TEST: start some more daemon instances and test replay"
+start_mirror ${CLUSTER1}:3
+start_mirror ${CLUSTER1}:4
+start_mirror ${CLUSTER1}:5
+start_mirror ${CLUSTER1}:6
+image13=test13
+create_image ${CLUSTER2} ${POOL} ${image13}
+test_replay ${leader} ${image1} ${image13}
+
+testlog "TEST: release leader and test it is acquired by secondary"
+release_leader
+wait_for_leader_released
+wait_for_leader
+test_replay ${image1} ${image2}
+
+testlog "TEST: in loop: stop leader and test replay"
+for i in 0 1 2 3 4 5; do
+ stop_mirror ${CLUSTER1}:${LEADER}
+ wait_for_leader
+ test_replay ${image1}
+done
+
+stop_mirror ${CLUSTER1}:${LEADER}
diff --git a/qa/workunits/rbd/rbd_mirror_helpers.sh b/qa/workunits/rbd/rbd_mirror_helpers.sh
new file mode 100755
index 000000000..f4961b925
--- /dev/null
+++ b/qa/workunits/rbd/rbd_mirror_helpers.sh
@@ -0,0 +1,1488 @@
+#!/bin/sh
+#
+# rbd_mirror_helpers.sh - shared rbd-mirror daemon helper functions
+#
+# The scripts starts two ("local" and "remote") clusters using mstart.sh script,
+# creates a temporary directory, used for cluster configs, daemon logs, admin
+# socket, temporary files, and launches rbd-mirror daemon.
+#
+# There are several env variables useful when troubleshooting a test failure:
+#
+# RBD_MIRROR_NOCLEANUP - if not empty, don't run the cleanup (stop processes,
+# destroy the clusters and remove the temp directory)
+# on exit, so it is possible to check the test state
+# after failure.
+# RBD_MIRROR_TEMDIR - use this path when creating the temporary directory
+# (should not exist) instead of running mktemp(1).
+# RBD_MIRROR_ARGS - use this to pass additional arguments to started
+# rbd-mirror daemons.
+# RBD_MIRROR_VARGS - use this to pass additional arguments to vstart.sh
+# when starting clusters.
+# RBD_MIRROR_INSTANCES - number of daemons to start per cluster
+# RBD_MIRROR_CONFIG_KEY - if not empty, use config-key for remote cluster
+# secrets
+# The cleanup can be done as a separate step, running the script with
+# `cleanup ${RBD_MIRROR_TEMDIR}' arguments.
+#
+# Note, as other workunits tests, rbd_mirror_journal.sh expects to find ceph binaries
+# in PATH.
+#
+# Thus a typical troubleshooting session:
+#
+# From Ceph src dir (CEPH_SRC_PATH), start the test in NOCLEANUP mode and with
+# TEMPDIR pointing to a known location:
+#
+# cd $CEPH_SRC_PATH
+# PATH=$CEPH_SRC_PATH:$PATH
+# RBD_MIRROR_NOCLEANUP=1 RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror \
+# ../qa/workunits/rbd/rbd_mirror_journal.sh
+#
+# After the test failure cd to TEMPDIR and check the current state:
+#
+# cd /tmp/tmp.rbd_mirror
+# ls
+# less rbd-mirror.cluster1_daemon.$pid.log
+# ceph --cluster cluster1 -s
+# ceph --cluster cluster1 -s
+# rbd --cluster cluster2 -p mirror ls
+# rbd --cluster cluster2 -p mirror journal status --image test
+# ceph --admin-daemon rbd-mirror.cluster1_daemon.cluster1.$pid.asok help
+# ...
+#
+# Also you can execute commands (functions) from the script:
+#
+# cd $CEPH_SRC_PATH
+# export RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror
+# ../qa/workunits/rbd/rbd_mirror_journal.sh status
+# ../qa/workunits/rbd/rbd_mirror_journal.sh stop_mirror cluster1
+# ../qa/workunits/rbd/rbd_mirror_journal.sh start_mirror cluster2
+# ../qa/workunits/rbd/rbd_mirror_journal.sh flush cluster2
+# ...
+#
+# Eventually, run the cleanup:
+#
+# cd $CEPH_SRC_PATH
+# RBD_MIRROR_TEMDIR=/tmp/tmp.rbd_mirror \
+# ../qa/workunits/rbd/rbd_mirror_journal.sh cleanup
+#
+
+if type xmlstarlet > /dev/null 2>&1; then
+ XMLSTARLET=xmlstarlet
+elif type xml > /dev/null 2>&1; then
+ XMLSTARLET=xml
+else
+ echo "Missing xmlstarlet binary!"
+ exit 1
+fi
+
+RBD_MIRROR_INSTANCES=${RBD_MIRROR_INSTANCES:-2}
+
+CLUSTER1=cluster1
+CLUSTER2=cluster2
+PEER_CLUSTER_SUFFIX=
+POOL=mirror
+PARENT_POOL=mirror_parent
+NS1=ns1
+NS2=ns2
+TEMPDIR=
+CEPH_ID=${CEPH_ID:-mirror}
+RBD_IMAGE_FEATURES=${RBD_IMAGE_FEATURES:-layering,exclusive-lock,journaling}
+MIRROR_USER_ID_PREFIX=${MIRROR_USER_ID_PREFIX:-${CEPH_ID}.}
+MIRROR_POOL_MODE=${MIRROR_POOL_MODE:-pool}
+MIRROR_IMAGE_MODE=${MIRROR_IMAGE_MODE:-journal}
+
+export CEPH_ARGS="--id ${CEPH_ID}"
+
+LAST_MIRROR_INSTANCE=$((${RBD_MIRROR_INSTANCES} - 1))
+
+CEPH_ROOT=$(readlink -f $(dirname $0)/../../../src)
+CEPH_BIN=.
+CEPH_SRC=.
+if [ -e CMakeCache.txt ]; then
+ CEPH_SRC=${CEPH_ROOT}
+ CEPH_ROOT=${PWD}
+ CEPH_BIN=./bin
+
+ # needed for ceph CLI under cmake
+ export LD_LIBRARY_PATH=${CEPH_ROOT}/lib:${LD_LIBRARY_PATH}
+ export PYTHONPATH=${PYTHONPATH}:${CEPH_SRC}/pybind:${CEPH_ROOT}/lib/cython_modules/lib.3
+fi
+
+# These vars facilitate running this script in an environment with
+# ceph installed from packages, like teuthology. These are not defined
+# by default.
+#
+# RBD_MIRROR_USE_EXISTING_CLUSTER - if set, do not start and stop ceph clusters
+# RBD_MIRROR_USE_RBD_MIRROR - if set, use an existing instance of rbd-mirror
+# running as ceph client $CEPH_ID. If empty,
+# this script will start and stop rbd-mirror
+
+#
+# Functions
+#
+
+# Parse a value in format cluster[:instance] and set cluster and instance vars.
+set_cluster_instance()
+{
+ local val=$1
+ local cluster_var_name=$2
+ local instance_var_name=$3
+
+ cluster=${val%:*}
+ instance=${val##*:}
+
+ if [ "${instance}" = "${val}" ]; then
+ # instance was not specified, use default
+ instance=0
+ fi
+
+ eval ${cluster_var_name}=${cluster}
+ eval ${instance_var_name}=${instance}
+}
+
+daemon_asok_file()
+{
+ local local_cluster=$1
+ local cluster=$2
+ local instance
+
+ set_cluster_instance "${local_cluster}" local_cluster instance
+
+ echo $(ceph-conf --cluster $local_cluster --name "client.${MIRROR_USER_ID_PREFIX}${instance}" 'admin socket')
+}
+
+daemon_pid_file()
+{
+ local cluster=$1
+ local instance
+
+ set_cluster_instance "${cluster}" cluster instance
+
+ echo $(ceph-conf --cluster $cluster --name "client.${MIRROR_USER_ID_PREFIX}${instance}" 'pid file')
+}
+
+testlog()
+{
+ echo $(date '+%F %T') $@ | tee -a "${TEMPDIR}/rbd-mirror.test.log" >&2
+}
+
+expect_failure()
+{
+ local expected="$1" ; shift
+ local out=${TEMPDIR}/expect_failure.out
+
+ if "$@" > ${out} 2>&1 ; then
+ cat ${out} >&2
+ return 1
+ fi
+
+ if [ -z "${expected}" ]; then
+ return 0
+ fi
+
+ if ! grep -q "${expected}" ${out} ; then
+ cat ${out} >&2
+ return 1
+ fi
+
+ return 0
+}
+
+mkfname()
+{
+ echo "$@" | sed -e 's|[/ ]|_|g'
+}
+
+create_users()
+{
+ local cluster=$1
+
+ CEPH_ARGS='' ceph --cluster "${cluster}" \
+ auth get-or-create client.${CEPH_ID} \
+ mon 'profile rbd' osd 'profile rbd' mgr 'profile rbd' >> \
+ ${CEPH_ROOT}/run/${cluster}/keyring
+ for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
+ CEPH_ARGS='' ceph --cluster "${cluster}" \
+ auth get-or-create client.${MIRROR_USER_ID_PREFIX}${instance} \
+ mon 'profile rbd-mirror' osd 'profile rbd' mgr 'profile rbd' >> \
+ ${CEPH_ROOT}/run/${cluster}/keyring
+ done
+}
+
+setup_cluster()
+{
+ local cluster=$1
+
+ CEPH_ARGS='' ${CEPH_SRC}/mstart.sh ${cluster} -n ${RBD_MIRROR_VARGS}
+
+ cd ${CEPH_ROOT}
+ rm -f ${TEMPDIR}/${cluster}.conf
+ ln -s $(readlink -f run/${cluster}/ceph.conf) \
+ ${TEMPDIR}/${cluster}.conf
+
+ cd ${TEMPDIR}
+ create_users "${cluster}"
+
+ for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
+ cat<<EOF >> ${TEMPDIR}/${cluster}.conf
+[client.${MIRROR_USER_ID_PREFIX}${instance}]
+ admin socket = ${TEMPDIR}/rbd-mirror.\$cluster-\$name.asok
+ pid file = ${TEMPDIR}/rbd-mirror.\$cluster-\$name.pid
+ log file = ${TEMPDIR}/rbd-mirror.${cluster}_daemon.${instance}.log
+EOF
+ done
+}
+
+peer_add()
+{
+ local cluster=$1 ; shift
+ local pool=$1 ; shift
+ local client_cluster=$1 ; shift
+ local remote_cluster="${client_cluster##*@}"
+
+ local uuid_var_name
+ if [ -n "$1" ]; then
+ uuid_var_name=$1 ; shift
+ fi
+
+ local error_code
+ local peer_uuid
+
+ for s in 1 2 4 8 16 32; do
+ set +e
+ peer_uuid=$(rbd --cluster ${cluster} mirror pool peer add \
+ ${pool} ${client_cluster} $@)
+ error_code=$?
+ set -e
+
+ if [ $error_code -eq 17 ]; then
+ # raced with a remote heartbeat ping -- remove and retry
+ sleep $s
+ peer_uuid=$(rbd mirror pool info --cluster ${cluster} --pool ${pool} --format xml | \
+ xmlstarlet sel -t -v "//peers/peer[site_name='${remote_cluster}']/uuid")
+
+ CEPH_ARGS='' rbd --cluster ${cluster} --pool ${pool} mirror pool peer remove ${peer_uuid}
+ else
+ test $error_code -eq 0
+ if [ -n "$uuid_var_name" ]; then
+ eval ${uuid_var_name}=${peer_uuid}
+ fi
+ return 0
+ fi
+ done
+
+ return 1
+}
+
+setup_pools()
+{
+ local cluster=$1
+ local remote_cluster=$2
+ local mon_map_file
+ local mon_addr
+ local admin_key_file
+ local uuid
+
+ CEPH_ARGS='' ceph --cluster ${cluster} osd pool create ${POOL} 64 64
+ CEPH_ARGS='' ceph --cluster ${cluster} osd pool create ${PARENT_POOL} 64 64
+
+ CEPH_ARGS='' rbd --cluster ${cluster} pool init ${POOL}
+ CEPH_ARGS='' rbd --cluster ${cluster} pool init ${PARENT_POOL}
+
+ if [ -n "${RBD_MIRROR_CONFIG_KEY}" ]; then
+ PEER_CLUSTER_SUFFIX=-DNE
+ fi
+
+ CEPH_ARGS='' rbd --cluster ${cluster} mirror pool enable \
+ --site-name ${cluster}${PEER_CLUSTER_SUFFIX} ${POOL} ${MIRROR_POOL_MODE}
+ rbd --cluster ${cluster} mirror pool enable ${PARENT_POOL} image
+
+ rbd --cluster ${cluster} namespace create ${POOL}/${NS1}
+ rbd --cluster ${cluster} namespace create ${POOL}/${NS2}
+
+ rbd --cluster ${cluster} mirror pool enable ${POOL}/${NS1} ${MIRROR_POOL_MODE}
+ rbd --cluster ${cluster} mirror pool enable ${POOL}/${NS2} image
+
+ if [ -z ${RBD_MIRROR_MANUAL_PEERS} ]; then
+ if [ -z ${RBD_MIRROR_CONFIG_KEY} ]; then
+ peer_add ${cluster} ${POOL} ${remote_cluster}
+ peer_add ${cluster} ${PARENT_POOL} ${remote_cluster}
+ else
+ mon_map_file=${TEMPDIR}/${remote_cluster}.monmap
+ CEPH_ARGS='' ceph --cluster ${remote_cluster} mon getmap > ${mon_map_file}
+ mon_addr=$(monmaptool --print ${mon_map_file} | grep -E 'mon\.' |
+ head -n 1 | sed -E 's/^[0-9]+: ([^ ]+).+$/\1/' | sed -E 's/\/[0-9]+//g')
+
+ admin_key_file=${TEMPDIR}/${remote_cluster}.client.${CEPH_ID}.key
+ CEPH_ARGS='' ceph --cluster ${remote_cluster} auth get-key client.${CEPH_ID} > ${admin_key_file}
+
+ CEPH_ARGS='' peer_add ${cluster} ${POOL} \
+ client.${CEPH_ID}@${remote_cluster}${PEER_CLUSTER_SUFFIX} '' \
+ --remote-mon-host "${mon_addr}" --remote-key-file ${admin_key_file}
+
+ peer_add ${cluster} ${PARENT_POOL} client.${CEPH_ID}@${remote_cluster}${PEER_CLUSTER_SUFFIX} uuid
+ CEPH_ARGS='' rbd --cluster ${cluster} mirror pool peer set ${PARENT_POOL} ${uuid} mon-host ${mon_addr}
+ CEPH_ARGS='' rbd --cluster ${cluster} mirror pool peer set ${PARENT_POOL} ${uuid} key-file ${admin_key_file}
+ fi
+ fi
+}
+
+setup_tempdir()
+{
+ if [ -n "${RBD_MIRROR_TEMDIR}" ]; then
+ test -d "${RBD_MIRROR_TEMDIR}" ||
+ mkdir "${RBD_MIRROR_TEMDIR}"
+ TEMPDIR="${RBD_MIRROR_TEMDIR}"
+ cd ${TEMPDIR}
+ else
+ TEMPDIR=`mktemp -d`
+ fi
+}
+
+setup()
+{
+ local c
+ trap 'cleanup $?' INT TERM EXIT
+
+ setup_tempdir
+ if [ -z "${RBD_MIRROR_USE_EXISTING_CLUSTER}" ]; then
+ setup_cluster "${CLUSTER1}"
+ setup_cluster "${CLUSTER2}"
+ fi
+
+ setup_pools "${CLUSTER1}" "${CLUSTER2}"
+ setup_pools "${CLUSTER2}" "${CLUSTER1}"
+
+ if [ -n "${RBD_MIRROR_MIN_COMPAT_CLIENT}" ]; then
+ CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd \
+ set-require-min-compat-client ${RBD_MIRROR_MIN_COMPAT_CLIENT}
+ CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd \
+ set-require-min-compat-client ${RBD_MIRROR_MIN_COMPAT_CLIENT}
+ fi
+}
+
+cleanup()
+{
+ local error_code=$1
+
+ set +e
+
+ if [ "${error_code}" -ne 0 ]; then
+ status
+ fi
+
+ if [ -z "${RBD_MIRROR_NOCLEANUP}" ]; then
+ local cluster instance
+
+ CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd pool rm ${POOL} ${POOL} --yes-i-really-really-mean-it
+ CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd pool rm ${POOL} ${POOL} --yes-i-really-really-mean-it
+ CEPH_ARGS='' ceph --cluster ${CLUSTER1} osd pool rm ${PARENT_POOL} ${PARENT_POOL} --yes-i-really-really-mean-it
+ CEPH_ARGS='' ceph --cluster ${CLUSTER2} osd pool rm ${PARENT_POOL} ${PARENT_POOL} --yes-i-really-really-mean-it
+
+ for cluster in "${CLUSTER1}" "${CLUSTER2}"; do
+ stop_mirrors "${cluster}"
+ done
+
+ if [ -z "${RBD_MIRROR_USE_EXISTING_CLUSTER}" ]; then
+ cd ${CEPH_ROOT}
+ CEPH_ARGS='' ${CEPH_SRC}/mstop.sh ${CLUSTER1}
+ CEPH_ARGS='' ${CEPH_SRC}/mstop.sh ${CLUSTER2}
+ fi
+ test "${RBD_MIRROR_TEMDIR}" = "${TEMPDIR}" || rm -Rf ${TEMPDIR}
+ fi
+
+ if [ "${error_code}" -eq 0 ]; then
+ echo "OK"
+ else
+ echo "FAIL"
+ fi
+
+ exit ${error_code}
+}
+
+start_mirror()
+{
+ local cluster=$1
+ local instance
+
+ set_cluster_instance "${cluster}" cluster instance
+
+ test -n "${RBD_MIRROR_USE_RBD_MIRROR}" && return
+
+ rbd-mirror \
+ --cluster ${cluster} \
+ --id ${MIRROR_USER_ID_PREFIX}${instance} \
+ --rbd-mirror-delete-retry-interval=5 \
+ --rbd-mirror-image-state-check-interval=5 \
+ --rbd-mirror-journal-poll-age=1 \
+ --rbd-mirror-pool-replayers-refresh-interval=5 \
+ --debug-rbd=30 --debug-journaler=30 \
+ --debug-rbd_mirror=30 \
+ --daemonize=true \
+ ${RBD_MIRROR_ARGS}
+}
+
+start_mirrors()
+{
+ local cluster=$1
+
+ for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
+ start_mirror "${cluster}:${instance}"
+ done
+}
+
+stop_mirror()
+{
+ local cluster=$1
+ local sig=$2
+
+ test -n "${RBD_MIRROR_USE_RBD_MIRROR}" && return
+
+ local pid
+ pid=$(cat $(daemon_pid_file "${cluster}") 2>/dev/null) || :
+ if [ -n "${pid}" ]
+ then
+ kill ${sig} ${pid}
+ for s in 1 2 4 8 16 32; do
+ sleep $s
+ ps auxww | awk -v pid=${pid} '$2 == pid {print; exit 1}' && break
+ done
+ ps auxww | awk -v pid=${pid} '$2 == pid {print; exit 1}'
+ fi
+ rm -f $(daemon_asok_file "${cluster}" "${CLUSTER1}")
+ rm -f $(daemon_asok_file "${cluster}" "${CLUSTER2}")
+ rm -f $(daemon_pid_file "${cluster}")
+}
+
+stop_mirrors()
+{
+ local cluster=$1
+ local sig=$2
+
+ for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
+ stop_mirror "${cluster}:${instance}" "${sig}"
+ done
+}
+
+admin_daemon()
+{
+ local cluster=$1 ; shift
+ local instance
+
+ set_cluster_instance "${cluster}" cluster instance
+
+ local asok_file=$(daemon_asok_file "${cluster}:${instance}" "${cluster}")
+ test -S "${asok_file}"
+
+ ceph --admin-daemon ${asok_file} $@
+}
+
+admin_daemons()
+{
+ local cluster_instance=$1 ; shift
+ local cluster="${cluster_instance%:*}"
+ local instance="${cluster_instance##*:}"
+ local loop_instance
+
+ for s in 0 1 2 4 8 8 8 8 8 8 8 8 16 16; do
+ sleep ${s}
+ if [ "${instance}" != "${cluster_instance}" ]; then
+ admin_daemon "${cluster}:${instance}" $@ && return 0
+ else
+ for loop_instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
+ admin_daemon "${cluster}:${loop_instance}" $@ && return 0
+ done
+ fi
+ done
+ return 1
+}
+
+all_admin_daemons()
+{
+ local cluster=$1 ; shift
+
+ for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
+ admin_daemon "${cluster}:${instance}" $@
+ done
+}
+
+status()
+{
+ local cluster daemon image_pool image_ns image
+
+ for cluster in ${CLUSTER1} ${CLUSTER2}
+ do
+ echo "${cluster} status"
+ CEPH_ARGS='' ceph --cluster ${cluster} -s
+ CEPH_ARGS='' ceph --cluster ${cluster} service dump
+ CEPH_ARGS='' ceph --cluster ${cluster} service status
+ echo
+
+ for image_pool in ${POOL} ${PARENT_POOL}
+ do
+ for image_ns in "" "${NS1}" "${NS2}"
+ do
+ echo "${cluster} ${image_pool} ${image_ns} images"
+ rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" ls -l
+ echo
+
+ echo "${cluster} ${image_pool}${image_ns} mirror pool info"
+ rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" mirror pool info
+ echo
+
+ echo "${cluster} ${image_pool}${image_ns} mirror pool status"
+ CEPH_ARGS='' rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" mirror pool status --verbose
+ echo
+
+ for image in `rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" ls 2>/dev/null`
+ do
+ echo "image ${image} info"
+ rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" info ${image}
+ echo
+ echo "image ${image} journal status"
+ rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" journal status --image ${image}
+ echo
+ echo "image ${image} snapshots"
+ rbd --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" snap ls --all ${image}
+ echo
+ done
+
+ echo "${cluster} ${image_pool} ${image_ns} rbd_mirroring omap vals"
+ rados --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" listomapvals rbd_mirroring
+ echo "${cluster} ${image_pool} ${image_ns} rbd_mirror_leader omap vals"
+ rados --cluster ${cluster} -p ${image_pool} --namespace "${image_ns}" listomapvals rbd_mirror_leader
+ echo
+ done
+ done
+ done
+
+ local ret
+
+ for cluster in "${CLUSTER1}" "${CLUSTER2}"
+ do
+ for instance in `seq 0 ${LAST_MIRROR_INSTANCE}`; do
+ local pid_file=$(daemon_pid_file ${cluster}:${instance})
+ if [ ! -e ${pid_file} ]
+ then
+ echo "${cluster} rbd-mirror not running or unknown" \
+ "(${pid_file} not exist)"
+ continue
+ fi
+
+ local pid
+ pid=$(cat ${pid_file} 2>/dev/null) || :
+ if [ -z "${pid}" ]
+ then
+ echo "${cluster} rbd-mirror not running or unknown" \
+ "(can't find pid using ${pid_file})"
+ ret=1
+ continue
+ fi
+
+ echo "${daemon} rbd-mirror process in ps output:"
+ if ps auxww |
+ awk -v pid=${pid} 'NR == 1 {print} $2 == pid {print; exit 1}'
+ then
+ echo
+ echo "${cluster} rbd-mirror not running" \
+ "(can't find pid $pid in ps output)"
+ ret=1
+ continue
+ fi
+ echo
+
+ local asok_file=$(daemon_asok_file ${cluster}:${instance} ${cluster})
+ if [ ! -S "${asok_file}" ]
+ then
+ echo "${cluster} rbd-mirror asok is unknown (${asok_file} not exits)"
+ ret=1
+ continue
+ fi
+
+ echo "${cluster} rbd-mirror status"
+ ceph --admin-daemon ${asok_file} rbd mirror status
+ echo
+ done
+ done
+
+ return ${ret}
+}
+
+flush()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local cmd="rbd mirror flush"
+
+ if [ -n "${image}" ]
+ then
+ cmd="${cmd} ${pool}/${image}"
+ fi
+
+ admin_daemons "${cluster}" ${cmd}
+}
+
+test_image_replay_state()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local test_state=$4
+ local status_result
+ local current_state=stopped
+
+ status_result=$(admin_daemons "${cluster}" rbd mirror status ${pool}/${image} | grep -i 'state') || return 1
+ echo "${status_result}" | grep -i 'Replaying' && current_state=started
+ test "${test_state}" = "${current_state}"
+}
+
+wait_for_image_replay_state()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local state=$4
+ local s
+
+ # TODO: add a way to force rbd-mirror to update replayers
+ for s in 1 2 4 8 8 8 8 8 8 8 8 16 16; do
+ sleep ${s}
+ test_image_replay_state "${cluster}" "${pool}" "${image}" "${state}" && return 0
+ done
+ return 1
+}
+
+wait_for_image_replay_started()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ wait_for_image_replay_state "${cluster}" "${pool}" "${image}" started
+}
+
+wait_for_image_replay_stopped()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ wait_for_image_replay_state "${cluster}" "${pool}" "${image}" stopped
+}
+
+get_journal_position()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local id_regexp=$4
+
+ # Parse line like below, looking for the first position
+ # [id=, commit_position=[positions=[[object_number=1, tag_tid=3, entry_tid=9], [object_number=0, tag_tid=3, entry_tid=8], [object_number=3, tag_tid=3, entry_tid=7], [object_number=2, tag_tid=3, entry_tid=6]]]]
+
+ local status_log=${TEMPDIR}/$(mkfname ${CLUSTER2}-${pool}-${image}.status)
+ rbd --cluster ${cluster} journal status --image ${pool}/${image} |
+ tee ${status_log} >&2
+ sed -nEe 's/^.*\[id='"${id_regexp}"',.*positions=\[\[([^]]*)\],.*state=connected.*$/\1/p' \
+ ${status_log}
+}
+
+get_master_journal_position()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ get_journal_position "${cluster}" "${pool}" "${image}" ''
+}
+
+get_mirror_journal_position()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ get_journal_position "${cluster}" "${pool}" "${image}" '..*'
+}
+
+wait_for_journal_replay_complete()
+{
+ local local_cluster=$1
+ local cluster=$2
+ local pool=$3
+ local image=$4
+ local s master_pos mirror_pos last_mirror_pos
+ local master_tag master_entry mirror_tag mirror_entry
+
+ while true; do
+ for s in 0.2 0.4 0.8 1.6 2 2 4 4 8 8 16 16 32 32; do
+ sleep ${s}
+ flush "${local_cluster}" "${pool}" "${image}"
+ master_pos=$(get_master_journal_position "${cluster}" "${pool}" "${image}")
+ mirror_pos=$(get_mirror_journal_position "${cluster}" "${pool}" "${image}")
+ test -n "${master_pos}" -a "${master_pos}" = "${mirror_pos}" && return 0
+ test "${mirror_pos}" != "${last_mirror_pos}" && break
+ done
+
+ test "${mirror_pos}" = "${last_mirror_pos}" && return 1
+ last_mirror_pos="${mirror_pos}"
+
+ # handle the case where the mirror is ahead of the master
+ master_tag=$(echo "${master_pos}" | grep -Eo "tag_tid=[0-9]*" | cut -d'=' -f 2)
+ mirror_tag=$(echo "${mirror_pos}" | grep -Eo "tag_tid=[0-9]*" | cut -d'=' -f 2)
+ master_entry=$(echo "${master_pos}" | grep -Eo "entry_tid=[0-9]*" | cut -d'=' -f 2)
+ mirror_entry=$(echo "${mirror_pos}" | grep -Eo "entry_tid=[0-9]*" | cut -d'=' -f 2)
+ test "${master_tag}" = "${mirror_tag}" -a ${master_entry} -le ${mirror_entry} && return 0
+ done
+ return 1
+}
+
+mirror_image_snapshot()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster "${cluster}" mirror image snapshot "${pool}/${image}"
+}
+
+get_newest_mirror_snapshot()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local log=$4
+
+ rbd --cluster "${cluster}" snap list --all "${pool}/${image}" --format xml | \
+ xmlstarlet sel -t -c "//snapshots/snapshot[namespace/complete='true' and position()=last()]" > \
+ ${log} || true
+}
+
+wait_for_snapshot_sync_complete()
+{
+ local local_cluster=$1
+ local cluster=$2
+ local pool=$3
+ local image=$4
+
+ local status_log=${TEMPDIR}/$(mkfname ${cluster}-${pool}-${image}.status)
+ local local_status_log=${TEMPDIR}/$(mkfname ${local_cluster}-${pool}-${image}.status)
+
+ mirror_image_snapshot "${cluster}" "${pool}" "${image}"
+ get_newest_mirror_snapshot "${cluster}" "${pool}" "${image}" "${status_log}"
+ local snapshot_id=$(xmlstarlet sel -t -v "//snapshot/id" < ${status_log})
+
+ while true; do
+ for s in 0.2 0.4 0.8 1.6 2 2 4 4 8 8 16 16 32 32; do
+ sleep ${s}
+
+ get_newest_mirror_snapshot "${local_cluster}" "${pool}" "${image}" "${local_status_log}"
+ local primary_snapshot_id=$(xmlstarlet sel -t -v "//snapshot/namespace/primary_snap_id" < ${local_status_log})
+
+ test "${snapshot_id}" = "${primary_snapshot_id}" && return 0
+ done
+
+ return 1
+ done
+ return 1
+}
+
+wait_for_replay_complete()
+{
+ local local_cluster=$1
+ local cluster=$2
+ local pool=$3
+ local image=$4
+
+ if [ "${MIRROR_IMAGE_MODE}" = "journal" ]; then
+ wait_for_journal_replay_complete ${local_cluster} ${cluster} ${pool} ${image}
+ elif [ "${MIRROR_IMAGE_MODE}" = "snapshot" ]; then
+ wait_for_snapshot_sync_complete ${local_cluster} ${cluster} ${pool} ${image}
+ else
+ return 1
+ fi
+}
+
+
+test_status_in_pool_dir()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local state_pattern="$4"
+ local description_pattern="$5"
+ local service_pattern="$6"
+
+ local status_log=${TEMPDIR}/$(mkfname ${cluster}-${pool}-${image}.mirror_status)
+ CEPH_ARGS='' rbd --cluster ${cluster} mirror image status ${pool}/${image} |
+ tee ${status_log} >&2
+ grep "^ state: .*${state_pattern}" ${status_log} || return 1
+ grep "^ description: .*${description_pattern}" ${status_log} || return 1
+
+ if [ -n "${service_pattern}" ]; then
+ grep "service: *${service_pattern}" ${status_log} || return 1
+ elif echo ${state_pattern} | grep '^up+'; then
+ grep "service: *${MIRROR_USER_ID_PREFIX}.* on " ${status_log} || return 1
+ else
+ grep "service: " ${status_log} && return 1
+ fi
+
+ # recheck using `mirror pool status` command to stress test it.
+
+ local last_update="$(sed -nEe 's/^ last_update: *(.*) *$/\1/p' ${status_log})"
+ test_mirror_pool_status_verbose \
+ ${cluster} ${pool} ${image} "${state_pattern}" "${last_update}" &&
+ return 0
+
+ echo "'mirror pool status' test failed" >&2
+ exit 1
+}
+
+test_mirror_pool_status_verbose()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local state_pattern="$4"
+ local prev_last_update="$5"
+
+ local status_log=${TEMPDIR}/$(mkfname ${cluster}-${pool}.mirror_status)
+
+ rbd --cluster ${cluster} mirror pool status ${pool} --verbose --format xml \
+ > ${status_log}
+
+ local last_update state
+ last_update=$($XMLSTARLET sel -t -v \
+ "//images/image[name='${image}']/last_update" < ${status_log})
+ state=$($XMLSTARLET sel -t -v \
+ "//images/image[name='${image}']/state" < ${status_log})
+
+ echo "${state}" | grep "${state_pattern}" ||
+ test "${last_update}" '>' "${prev_last_update}"
+}
+
+wait_for_status_in_pool_dir()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local state_pattern="$4"
+ local description_pattern="$5"
+ local service_pattern="$6"
+
+ for s in 1 2 4 8 8 8 8 8 8 8 8 16 16; do
+ sleep ${s}
+ test_status_in_pool_dir ${cluster} ${pool} ${image} "${state_pattern}" \
+ "${description_pattern}" "${service_pattern}" &&
+ return 0
+ done
+ return 1
+}
+
+create_image()
+{
+ local cluster=$1 ; shift
+ local pool=$1 ; shift
+ local image=$1 ; shift
+ local size=128
+
+ if [ -n "$1" ]; then
+ size=$1
+ shift
+ fi
+
+ rbd --cluster ${cluster} create --size ${size} \
+ --image-feature "${RBD_IMAGE_FEATURES}" $@ ${pool}/${image}
+}
+
+create_image_and_enable_mirror()
+{
+ local cluster=$1 ; shift
+ local pool=$1 ; shift
+ local image=$1 ; shift
+ local mode=${1:-${MIRROR_IMAGE_MODE}}
+ if [ -n "$1" ]; then
+ shift
+ fi
+
+ create_image ${cluster} ${pool} ${image} $@
+ if [ "${MIRROR_POOL_MODE}" = "image" ] || [ "$pool" = "${PARENT_POOL}" ]; then
+ enable_mirror ${cluster} ${pool} ${image} ${mode}
+ fi
+}
+
+enable_journaling()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster ${cluster} feature enable ${pool}/${image} journaling
+}
+
+set_image_meta()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local key=$4
+ local val=$5
+
+ rbd --cluster ${cluster} image-meta set ${pool}/${image} $key $val
+}
+
+compare_image_meta()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local key=$4
+ local value=$5
+
+ test `rbd --cluster ${cluster} image-meta get ${pool}/${image} ${key}` = "${value}"
+}
+
+rename_image()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local new_name=$4
+
+ rbd --cluster=${cluster} rename ${pool}/${image} ${pool}/${new_name}
+}
+
+remove_image()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster=${cluster} snap purge ${pool}/${image}
+ rbd --cluster=${cluster} rm ${pool}/${image}
+}
+
+remove_image_retry()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ for s in 0 1 2 4 8 16 32; do
+ sleep ${s}
+ remove_image ${cluster} ${pool} ${image} && return 0
+ done
+ return 1
+}
+
+trash_move() {
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster=${cluster} trash move ${pool}/${image}
+}
+
+trash_restore() {
+ local cluster=$1
+ local pool=$2
+ local image_id=$3
+
+ rbd --cluster=${cluster} trash restore ${pool}/${image_id}
+}
+
+clone_image()
+{
+ local cluster=$1
+ local parent_pool=$2
+ local parent_image=$3
+ local parent_snap=$4
+ local clone_pool=$5
+ local clone_image=$6
+
+ shift 6
+
+ rbd --cluster ${cluster} clone \
+ ${parent_pool}/${parent_image}@${parent_snap} \
+ ${clone_pool}/${clone_image} --image-feature "${RBD_IMAGE_FEATURES}" $@
+}
+
+clone_image_and_enable_mirror()
+{
+ local cluster=$1
+ local parent_pool=$2
+ local parent_image=$3
+ local parent_snap=$4
+ local clone_pool=$5
+ local clone_image=$6
+ shift 6
+
+ local mode=${1:-${MIRROR_IMAGE_MODE}}
+ if [ -n "$1" ]; then
+ shift
+ fi
+
+ clone_image ${cluster} ${parent_pool} ${parent_image} ${parent_snap} ${clone_pool} ${clone_image} $@
+ enable_mirror ${cluster} ${clone_pool} ${clone_image} ${mode}
+}
+
+disconnect_image()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster ${cluster} journal client disconnect \
+ --image ${pool}/${image}
+}
+
+create_snapshot()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap=$4
+
+ rbd --cluster ${cluster} snap create ${pool}/${image}@${snap}
+}
+
+remove_snapshot()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap=$4
+
+ rbd --cluster ${cluster} snap rm ${pool}/${image}@${snap}
+}
+
+rename_snapshot()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap=$4
+ local new_snap=$5
+
+ rbd --cluster ${cluster} snap rename ${pool}/${image}@${snap} \
+ ${pool}/${image}@${new_snap}
+}
+
+purge_snapshots()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster ${cluster} snap purge ${pool}/${image}
+}
+
+protect_snapshot()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap=$4
+
+ rbd --cluster ${cluster} snap protect ${pool}/${image}@${snap}
+}
+
+unprotect_snapshot()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap=$4
+
+ rbd --cluster ${cluster} snap unprotect ${pool}/${image}@${snap}
+}
+
+unprotect_snapshot_retry()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap=$4
+
+ for s in 0 1 2 4 8 16 32; do
+ sleep ${s}
+ unprotect_snapshot ${cluster} ${pool} ${image} ${snap} && return 0
+ done
+ return 1
+}
+
+wait_for_snap_present()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap_name=$4
+ local s
+
+ for s in 1 2 4 8 8 8 8 8 8 8 8 16 16 16 16 32 32 32 32; do
+ sleep ${s}
+ rbd --cluster ${cluster} info ${pool}/${image}@${snap_name} || continue
+ return 0
+ done
+ return 1
+}
+
+test_snap_moved_to_trash()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap_name=$4
+
+ rbd --cluster ${cluster} snap ls ${pool}/${image} --all |
+ grep -F " trash (${snap_name})"
+}
+
+wait_for_snap_moved_to_trash()
+{
+ local s
+
+ for s in 1 2 4 8 8 8 8 8 8 8 8 16 16 16 16 32 32 32 32; do
+ sleep ${s}
+ test_snap_moved_to_trash $@ || continue
+ return 0
+ done
+ return 1
+}
+
+test_snap_removed_from_trash()
+{
+ test_snap_moved_to_trash $@ && return 1
+ return 0
+}
+
+wait_for_snap_removed_from_trash()
+{
+ local s
+
+ for s in 1 2 4 8 8 8 8 8 8 8 8 16 16 16 16 32 32 32 32; do
+ sleep ${s}
+ test_snap_removed_from_trash $@ || continue
+ return 0
+ done
+ return 1
+}
+
+count_mirror_snaps()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster ${cluster} snap ls ${pool}/${image} --all |
+ grep -c -F " mirror ("
+}
+
+write_image()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local count=$4
+ local size=$5
+
+ test -n "${size}" || size=4096
+
+ rbd --cluster ${cluster} bench ${pool}/${image} --io-type write \
+ --io-size ${size} --io-threads 1 --io-total $((size * count)) \
+ --io-pattern rand
+}
+
+stress_write_image()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local duration=$(awk 'BEGIN {srand(); print int(10 * rand()) + 5}')
+
+ set +e
+ timeout ${duration}s ceph_test_rbd_mirror_random_write \
+ --cluster ${cluster} ${pool} ${image} \
+ --debug-rbd=20 --debug-journaler=20 \
+ 2> ${TEMPDIR}/rbd-mirror-random-write.log
+ error_code=$?
+ set -e
+
+ if [ $error_code -eq 124 ]; then
+ return 0
+ fi
+ return 1
+}
+
+show_diff()
+{
+ local file1=$1
+ local file2=$2
+
+ xxd ${file1} > ${file1}.xxd
+ xxd ${file2} > ${file2}.xxd
+ sdiff -s ${file1}.xxd ${file2}.xxd | head -n 64
+ rm -f ${file1}.xxd ${file2}.xxd
+}
+
+compare_images()
+{
+ local pool=$1
+ local image=$2
+ local ret=0
+
+ local rmt_export=${TEMPDIR}/$(mkfname ${CLUSTER2}-${pool}-${image}.export)
+ local loc_export=${TEMPDIR}/$(mkfname ${CLUSTER1}-${pool}-${image}.export)
+
+ rm -f ${rmt_export} ${loc_export}
+ rbd --cluster ${CLUSTER2} export ${pool}/${image} ${rmt_export}
+ rbd --cluster ${CLUSTER1} export ${pool}/${image} ${loc_export}
+ if ! cmp ${rmt_export} ${loc_export}
+ then
+ show_diff ${rmt_export} ${loc_export}
+ ret=1
+ fi
+ rm -f ${rmt_export} ${loc_export}
+ return ${ret}
+}
+
+compare_image_snapshots()
+{
+ local pool=$1
+ local image=$2
+ local ret=0
+
+ local rmt_export=${TEMPDIR}/${CLUSTER2}-${pool}-${image}.export
+ local loc_export=${TEMPDIR}/${CLUSTER1}-${pool}-${image}.export
+
+ for snap_name in $(rbd --cluster ${CLUSTER1} --format xml \
+ snap list ${pool}/${image} | \
+ $XMLSTARLET sel -t -v "//snapshot/name" | \
+ grep -E -v "^\.rbd-mirror\."); do
+ rm -f ${rmt_export} ${loc_export}
+ rbd --cluster ${CLUSTER2} export ${pool}/${image}@${snap_name} ${rmt_export}
+ rbd --cluster ${CLUSTER1} export ${pool}/${image}@${snap_name} ${loc_export}
+ if ! cmp ${rmt_export} ${loc_export}
+ then
+ show_diff ${rmt_export} ${loc_export}
+ ret=1
+ fi
+ done
+ rm -f ${rmt_export} ${loc_export}
+ return ${ret}
+}
+
+demote_image()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster=${cluster} mirror image demote ${pool}/${image}
+}
+
+promote_image()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local force=$4
+
+ rbd --cluster=${cluster} mirror image promote ${pool}/${image} ${force}
+}
+
+set_pool_mirror_mode()
+{
+ local cluster=$1
+ local pool=$2
+ local mode=${3:-${MIRROR_POOL_MODE}}
+
+ rbd --cluster=${cluster} mirror pool enable ${pool} ${mode}
+}
+
+disable_mirror()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster=${cluster} mirror image disable ${pool}/${image}
+}
+
+enable_mirror()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local mode=${4:-${MIRROR_IMAGE_MODE}}
+
+ rbd --cluster=${cluster} mirror image enable ${pool}/${image} ${mode}
+ # Display image info including the global image id for debugging purpose
+ rbd --cluster=${cluster} info ${pool}/${image}
+}
+
+test_image_present()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local test_state=$4
+ local image_id=$5
+ local current_state=deleted
+ local current_image_id
+
+ current_image_id=$(get_image_id ${cluster} ${pool} ${image})
+ test -n "${current_image_id}" &&
+ test -z "${image_id}" -o "${image_id}" = "${current_image_id}" &&
+ current_state=present
+
+ test "${test_state}" = "${current_state}"
+}
+
+wait_for_image_present()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local state=$4
+ local image_id=$5
+ local s
+
+ test -n "${image_id}" ||
+ image_id=$(get_image_id ${cluster} ${pool} ${image})
+
+ # TODO: add a way to force rbd-mirror to update replayers
+ for s in 0.1 1 2 4 8 8 8 8 8 8 8 8 16 16 32 32; do
+ sleep ${s}
+ test_image_present \
+ "${cluster}" "${pool}" "${image}" "${state}" "${image_id}" &&
+ return 0
+ done
+ return 1
+}
+
+get_image_id()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster=${cluster} info ${pool}/${image} |
+ sed -ne 's/^.*block_name_prefix: rbd_data\.//p'
+}
+
+request_resync_image()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local image_id_var_name=$4
+
+ eval "${image_id_var_name}='$(get_image_id ${cluster} ${pool} ${image})'"
+ eval 'test -n "$'${image_id_var_name}'"'
+
+ rbd --cluster=${cluster} mirror image resync ${pool}/${image}
+}
+
+get_image_data_pool()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster ${cluster} info ${pool}/${image} |
+ awk '$1 == "data_pool:" {print $2}'
+}
+
+get_clone_format()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+
+ rbd --cluster ${cluster} info ${pool}/${image} |
+ awk 'BEGIN {
+ format = 1
+ }
+ $1 == "parent:" {
+ parent = $2
+ }
+ /op_features: .*clone-child/ {
+ format = 2
+ }
+ END {
+ if (!parent) exit 1
+ print format
+ }'
+}
+
+list_omap_keys()
+{
+ local cluster=$1
+ local pool=$2
+ local obj_name=$3
+
+ rados --cluster ${cluster} -p ${pool} listomapkeys ${obj_name}
+}
+
+count_omap_keys_with_filter()
+{
+ local cluster=$1
+ local pool=$2
+ local obj_name=$3
+ local filter=$4
+
+ list_omap_keys ${cluster} ${pool} ${obj_name} | grep -c ${filter}
+}
+
+wait_for_omap_keys()
+{
+ local cluster=$1
+ local pool=$2
+ local obj_name=$3
+ local filter=$4
+
+ for s in 0 1 2 2 4 4 8 8 8 16 16 32; do
+ sleep $s
+
+ set +e
+ test "$(count_omap_keys_with_filter ${cluster} ${pool} ${obj_name} ${filter})" = 0
+ error_code=$?
+ set -e
+
+ if [ $error_code -eq 0 ]; then
+ return 0
+ fi
+ done
+
+ return 1
+}
+
+wait_for_image_in_omap()
+{
+ local cluster=$1
+ local pool=$2
+
+ wait_for_omap_keys ${cluster} ${pool} rbd_mirroring status_global
+ wait_for_omap_keys ${cluster} ${pool} rbd_mirroring image_
+ wait_for_omap_keys ${cluster} ${pool} rbd_mirror_leader image_map
+}
+
+#
+# Main
+#
+
+if [ "$#" -gt 0 ]
+then
+ if [ -z "${RBD_MIRROR_TEMDIR}" ]
+ then
+ echo "RBD_MIRROR_TEMDIR is not set" >&2
+ exit 1
+ fi
+
+ TEMPDIR="${RBD_MIRROR_TEMDIR}"
+ cd ${TEMPDIR}
+ $@
+ exit $?
+fi
diff --git a/qa/workunits/rbd/rbd_mirror_journal.sh b/qa/workunits/rbd/rbd_mirror_journal.sh
new file mode 100755
index 000000000..54f6aeec8
--- /dev/null
+++ b/qa/workunits/rbd/rbd_mirror_journal.sh
@@ -0,0 +1,614 @@
+#!/bin/sh -ex
+#
+# rbd_mirror_journal.sh - test rbd-mirror daemon in journal-based mirroring mode
+#
+# The scripts starts two ("local" and "remote") clusters using mstart.sh script,
+# creates a temporary directory, used for cluster configs, daemon logs, admin
+# socket, temporary files, and launches rbd-mirror daemon.
+#
+
+. $(dirname $0)/rbd_mirror_helpers.sh
+
+setup
+
+testlog "TEST: add image and test replay"
+start_mirrors ${CLUSTER1}
+image=test
+create_image ${CLUSTER2} ${POOL} ${image}
+set_image_meta ${CLUSTER2} ${POOL} ${image} "key1" "value1"
+set_image_meta ${CLUSTER2} ${POOL} ${image} "key2" "value2"
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'down+unknown'
+fi
+compare_images ${POOL} ${image}
+compare_image_meta ${CLUSTER1} ${POOL} ${image} "key1" "value1"
+compare_image_meta ${CLUSTER1} ${POOL} ${image} "key2" "value2"
+
+testlog "TEST: stop mirror, add image, start mirror and test replay"
+stop_mirrors ${CLUSTER1}
+image1=test1
+create_image ${CLUSTER2} ${POOL} ${image1}
+write_image ${CLUSTER2} ${POOL} ${image1} 100
+start_mirrors ${CLUSTER1}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image1}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying' 'primary_position'
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image1} 'down+unknown'
+fi
+compare_images ${POOL} ${image1}
+
+testlog "TEST: test the first image is replaying after restart"
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+compare_images ${POOL} ${image}
+
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ testlog "TEST: stop/start/restart mirror via admin socket"
+ all_admin_daemons ${CLUSTER1} rbd mirror stop
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror start
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror restart
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror stop
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror restart
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
+
+ admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror start ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ admin_daemons ${CLUSTER1} rbd mirror restart ${POOL}/${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+
+ all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ flush ${CLUSTER1}
+ all_admin_daemons ${CLUSTER1} rbd mirror status
+fi
+
+remove_image_retry ${CLUSTER2} ${POOL} ${image1}
+
+testlog "TEST: test image rename"
+new_name="${image}_RENAMED"
+rename_image ${CLUSTER2} ${POOL} ${image} ${new_name}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
+admin_daemons ${CLUSTER1} rbd mirror status ${POOL}/${new_name}
+admin_daemons ${CLUSTER1} rbd mirror restart ${POOL}/${new_name}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
+rename_image ${CLUSTER2} ${POOL} ${new_name} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+
+testlog "TEST: test trash move restore"
+image_id=$(get_image_id ${CLUSTER2} ${POOL} ${image})
+trash_move ${CLUSTER2} ${POOL} ${image}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+trash_restore ${CLUSTER2} ${POOL} ${image_id}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+
+testlog "TEST: check if removed images' OMAP are removed (with rbd-mirror on one cluster)"
+remove_image_retry ${CLUSTER2} ${POOL} ${image}
+
+wait_for_image_in_omap ${CLUSTER1} ${POOL}
+wait_for_image_in_omap ${CLUSTER2} ${POOL}
+
+create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+
+testlog "TEST: failover and failback"
+start_mirrors ${CLUSTER2}
+
+# demote and promote same cluster
+demote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+compare_images ${POOL} ${image}
+
+# failover (unmodified)
+demote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
+
+# failback (unmodified)
+demote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
+compare_images ${POOL} ${image}
+
+# failover
+demote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
+write_image ${CLUSTER1} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+replaying' 'primary_position'
+compare_images ${POOL} ${image}
+
+# failback
+demote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
+compare_images ${POOL} ${image}
+
+testlog "TEST: failover / failback loop"
+for i in `seq 1 20`; do
+ demote_image ${CLUSTER2} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+ promote_image ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
+ wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+replaying'
+ demote_image ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+ promote_image ${CLUSTER2} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+done
+
+testlog "TEST: force promote"
+force_promote_image=test_force_promote
+create_image ${CLUSTER2} ${POOL} ${force_promote_image}
+write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
+wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${force_promote_image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${force_promote_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+replaying' 'primary_position'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
+promote_image ${CLUSTER1} ${POOL} ${force_promote_image} '--force'
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${force_promote_image}
+wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+stopped'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
+write_image ${CLUSTER1} ${POOL} ${force_promote_image} 100
+write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
+remove_image_retry ${CLUSTER1} ${POOL} ${force_promote_image}
+remove_image_retry ${CLUSTER2} ${POOL} ${force_promote_image}
+
+testlog "TEST: cloned images"
+testlog " - default"
+parent_image=test_parent
+parent_snap=snap
+create_image ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+write_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} 100
+create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+protect_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+
+clone_image=test_clone
+clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} ${clone_image}
+write_image ${CLUSTER2} ${POOL} ${clone_image} 100
+
+enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image} journal
+wait_for_image_replay_started ${CLUSTER1} ${PARENT_POOL} ${parent_image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${PARENT_POOL} ${parent_image} 'up+replaying' 'primary_position'
+compare_images ${PARENT_POOL} ${parent_image}
+
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${clone_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${clone_image} 'up+replaying' 'primary_position'
+compare_images ${POOL} ${clone_image}
+remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}
+
+testlog " - clone v1"
+clone_image ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} ${clone_image}1
+
+clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} \
+ ${clone_image}_v1 --rbd-default-clone-format 1
+test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v1) = 1
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v1
+test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v1) = 1
+remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v1
+remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}1
+unprotect_snapshot_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+
+testlog " - clone v2"
+parent_snap=snap_v2
+create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} \
+ ${clone_image}_v2 --rbd-default-clone-format 2
+test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v2) = 2
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v2
+test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v2) = 2
+
+remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+test_snap_moved_to_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v2
+wait_for_image_present ${CLUSTER1} ${POOL} ${clone_image}_v2 'deleted'
+test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+
+testlog " - clone v2 non-primary"
+create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+wait_for_snap_present ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+clone_image ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} \
+ ${clone_image}_v2 --rbd-default-clone-format 2
+remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}_v2
+wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+remove_image_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+
+testlog "TEST: data pool"
+dp_image=test_data_pool
+create_image ${CLUSTER2} ${POOL} ${dp_image} 128 --data-pool ${PARENT_POOL}
+data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL} ${dp_image})
+test "${data_pool}" = "${PARENT_POOL}"
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${dp_image}
+data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL} ${dp_image})
+test "${data_pool}" = "${PARENT_POOL}"
+create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap1'
+write_image ${CLUSTER2} ${POOL} ${dp_image} 100
+create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap2'
+write_image ${CLUSTER2} ${POOL} ${dp_image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${dp_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${dp_image} 'up+replaying' 'primary_position'
+compare_images ${POOL} ${dp_image}@snap1
+compare_images ${POOL} ${dp_image}@snap2
+compare_images ${POOL} ${dp_image}
+remove_image_retry ${CLUSTER2} ${POOL} ${dp_image}
+
+testlog "TEST: disable mirroring / delete non-primary image"
+image2=test2
+image3=test3
+image4=test4
+image5=test5
+for i in ${image2} ${image3} ${image4} ${image5}; do
+ create_image ${CLUSTER2} ${POOL} ${i}
+ write_image ${CLUSTER2} ${POOL} ${i} 100
+ create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
+ create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
+ if [ "${i}" = "${image4}" ] || [ "${i}" = "${image5}" ]; then
+ protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
+ protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
+ fi
+ write_image ${CLUSTER2} ${POOL} ${i} 100
+ wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'present'
+ wait_for_snap_present ${CLUSTER1} ${POOL} ${i} 'snap2'
+done
+
+set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
+for i in ${image2} ${image4}; do
+ disable_mirror ${CLUSTER2} ${POOL} ${i}
+done
+
+unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap1'
+unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap2'
+for i in ${image3} ${image5}; do
+ remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
+ remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
+ remove_image_retry ${CLUSTER2} ${POOL} ${i}
+done
+
+for i in ${image2} ${image3} ${image4} ${image5}; do
+ wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'deleted'
+done
+
+set_pool_mirror_mode ${CLUSTER2} ${POOL} 'pool'
+for i in ${image2} ${image4}; do
+ enable_journaling ${CLUSTER2} ${POOL} ${i}
+ wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'present'
+ wait_for_snap_present ${CLUSTER1} ${POOL} ${i} 'snap2'
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${i}
+ wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${i}
+ compare_images ${POOL} ${i}
+done
+
+testlog "TEST: remove mirroring pool"
+pool=pool_to_remove
+for cluster in ${CLUSTER1} ${CLUSTER2}; do
+ CEPH_ARGS='' ceph --cluster ${cluster} osd pool create ${pool} 16 16
+ CEPH_ARGS='' rbd --cluster ${cluster} pool init ${pool}
+ rbd --cluster ${cluster} mirror pool enable ${pool} pool
+done
+peer_add ${CLUSTER1} ${pool} ${CLUSTER2}
+peer_add ${CLUSTER2} ${pool} ${CLUSTER1}
+rdp_image=test_remove_data_pool
+create_image ${CLUSTER2} ${pool} ${image} 128
+create_image ${CLUSTER2} ${POOL} ${rdp_image} 128 --data-pool ${pool}
+write_image ${CLUSTER2} ${pool} ${image} 100
+write_image ${CLUSTER2} ${POOL} ${rdp_image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${pool} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${pool} ${image} 'up+replaying' 'primary_position'
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${rdp_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${rdp_image} 'up+replaying' 'primary_position'
+for cluster in ${CLUSTER1} ${CLUSTER2}; do
+ CEPH_ARGS='' ceph --cluster ${cluster} osd pool rm ${pool} ${pool} --yes-i-really-really-mean-it
+done
+remove_image_retry ${CLUSTER2} ${POOL} ${rdp_image}
+wait_for_image_present ${CLUSTER1} ${POOL} ${rdp_image} 'deleted'
+for i in 0 1 2 4 8 8 8 8 16 16; do
+ sleep $i
+ admin_daemons "${CLUSTER2}" rbd mirror status ${pool}/${image} || break
+done
+admin_daemons "${CLUSTER2}" rbd mirror status ${pool}/${image} && false
+
+testlog "TEST: snapshot rename"
+snap_name='snap_rename'
+create_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_0"
+for i in `seq 1 20`; do
+ rename_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_$(expr ${i} - 1)" "${snap_name}_${i}"
+done
+wait_for_snap_present ${CLUSTER1} ${POOL} ${image2} "${snap_name}_${i}"
+
+unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap1'
+unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap2'
+for i in ${image2} ${image4}; do
+ remove_image_retry ${CLUSTER2} ${POOL} ${i}
+done
+
+testlog "TEST: disable mirror while daemon is stopped"
+stop_mirrors ${CLUSTER1}
+stop_mirrors ${CLUSTER2}
+set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
+disable_mirror ${CLUSTER2} ${POOL} ${image}
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ test_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+fi
+start_mirrors ${CLUSTER1}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+set_pool_mirror_mode ${CLUSTER2} ${POOL} 'pool'
+enable_journaling ${CLUSTER2} ${POOL} ${image}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+
+testlog "TEST: non-default namespace image mirroring"
+testlog " - replay"
+create_image ${CLUSTER2} ${POOL}/${NS1} ${image}
+create_image ${CLUSTER2} ${POOL}/${NS2} ${image}
+enable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image} journal
+wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS1} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS2} ${image}
+write_image ${CLUSTER2} ${POOL}/${NS1} ${image} 100
+write_image ${CLUSTER2} ${POOL}/${NS2} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS2} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${image} 'up+replaying' 'primary_position'
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS2} ${image} 'up+replaying' 'primary_position'
+compare_images ${POOL}/${NS1} ${image}
+compare_images ${POOL}/${NS2} ${image}
+
+testlog " - disable mirroring / delete image"
+remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${image}
+disable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image}
+wait_for_image_present ${CLUSTER1} ${POOL}/${NS1} ${image} 'deleted'
+wait_for_image_present ${CLUSTER1} ${POOL}/${NS2} ${image} 'deleted'
+remove_image_retry ${CLUSTER2} ${POOL}/${NS2} ${image}
+
+testlog " - data pool"
+dp_image=test_data_pool
+create_image ${CLUSTER2} ${POOL}/${NS1} ${dp_image} 128 --data-pool ${PARENT_POOL}
+data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL}/${NS1} ${dp_image})
+test "${data_pool}" = "${PARENT_POOL}"
+wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS1} ${dp_image}
+data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL}/${NS1} ${dp_image})
+test "${data_pool}" = "${PARENT_POOL}"
+write_image ${CLUSTER2} ${POOL}/${NS1} ${dp_image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${dp_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${dp_image} 'up+replaying' 'primary_position'
+compare_images ${POOL}/${NS1} ${dp_image}
+remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${dp_image}
+
+testlog "TEST: simple image resync"
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+compare_images ${POOL} ${image}
+
+testlog "TEST: image resync while replayer is stopped"
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ admin_daemons ${CLUSTER1} rbd mirror stop ${POOL}/${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+ admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
+ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+ admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
+ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+ compare_images ${POOL} ${image}
+fi
+
+testlog "TEST: request image resync while daemon is offline"
+stop_mirrors ${CLUSTER1}
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+start_mirrors ${CLUSTER1}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+compare_images ${POOL} ${image}
+remove_image_retry ${CLUSTER2} ${POOL} ${image}
+
+testlog "TEST: client disconnect"
+image=laggy
+create_image ${CLUSTER2} ${POOL} ${image} 128 --journal-object-size 64K
+write_image ${CLUSTER2} ${POOL} ${image} 10
+
+testlog " - replay stopped after disconnect"
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
+disconnect_image ${CLUSTER2} ${POOL} ${image}
+test -z "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
+
+testlog " - replay started after resync requested"
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
+compare_images ${POOL} ${image}
+
+testlog " - disconnected after max_concurrent_object_sets reached"
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ admin_daemons ${CLUSTER1} rbd mirror stop ${POOL}/${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
+ set_image_meta ${CLUSTER2} ${POOL} ${image} \
+ conf_rbd_journal_max_concurrent_object_sets 1
+ write_image ${CLUSTER2} ${POOL} ${image} 20 16384
+ write_image ${CLUSTER2} ${POOL} ${image} 20 16384
+ test -z "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
+ set_image_meta ${CLUSTER2} ${POOL} ${image} \
+ conf_rbd_journal_max_concurrent_object_sets 0
+
+ testlog " - replay is still stopped (disconnected) after restart"
+ admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
+fi
+
+testlog " - replay started after resync requested"
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
+compare_images ${POOL} ${image}
+
+testlog " - rbd_mirroring_resync_after_disconnect config option"
+set_image_meta ${CLUSTER2} ${POOL} ${image} \
+ conf_rbd_mirroring_resync_after_disconnect true
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+image_id=$(get_image_id ${CLUSTER1} ${POOL} ${image})
+disconnect_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+test -n "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
+compare_images ${POOL} ${image}
+set_image_meta ${CLUSTER2} ${POOL} ${image} \
+ conf_rbd_mirroring_resync_after_disconnect false
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+disconnect_image ${CLUSTER2} ${POOL} ${image}
+test -z "$(get_mirror_journal_position ${CLUSTER2} ${POOL} ${image})"
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'disconnected'
+remove_image_retry ${CLUSTER2} ${POOL} ${image}
+
+testlog "TEST: split-brain"
+image=split-brain
+create_image ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+promote_image ${CLUSTER1} ${POOL} ${image} --force
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+write_image ${CLUSTER1} ${POOL} ${image} 10
+demote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'split-brain'
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying' 'primary_position'
+remove_image_retry ${CLUSTER2} ${POOL} ${image}
+
+testlog "TEST: check if removed images' OMAP are removed"
+start_mirrors ${CLUSTER2}
+wait_for_image_in_omap ${CLUSTER1} ${POOL}
+wait_for_image_in_omap ${CLUSTER2} ${POOL}
+
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ # teuthology will trash the daemon
+ testlog "TEST: no blocklists"
+ CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER1} osd blocklist ls 2>&1 | grep -q "listed 0 entries"
+ CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER2} osd blocklist ls 2>&1 | grep -q "listed 0 entries"
+fi
diff --git a/qa/workunits/rbd/rbd_mirror_snapshot.sh b/qa/workunits/rbd/rbd_mirror_snapshot.sh
new file mode 100755
index 000000000..c70d48b09
--- /dev/null
+++ b/qa/workunits/rbd/rbd_mirror_snapshot.sh
@@ -0,0 +1,517 @@
+#!/bin/sh -ex
+#
+# rbd_mirror_snapshot.sh - test rbd-mirror daemon in snapshot-based mirroring mode
+#
+# The scripts starts two ("local" and "remote") clusters using mstart.sh script,
+# creates a temporary directory, used for cluster configs, daemon logs, admin
+# socket, temporary files, and launches rbd-mirror daemon.
+#
+
+MIRROR_POOL_MODE=image
+MIRROR_IMAGE_MODE=snapshot
+
+. $(dirname $0)/rbd_mirror_helpers.sh
+
+setup
+
+testlog "TEST: add image and test replay"
+start_mirrors ${CLUSTER1}
+image=test
+create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image}
+set_image_meta ${CLUSTER2} ${POOL} ${image} "key1" "value1"
+set_image_meta ${CLUSTER2} ${POOL} ${image} "key2" "value2"
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'down+unknown'
+fi
+compare_images ${POOL} ${image}
+compare_image_meta ${CLUSTER1} ${POOL} ${image} "key1" "value1"
+compare_image_meta ${CLUSTER1} ${POOL} ${image} "key2" "value2"
+
+testlog "TEST: stop mirror, add image, start mirror and test replay"
+stop_mirrors ${CLUSTER1}
+image1=test1
+create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image1}
+write_image ${CLUSTER2} ${POOL} ${image1} 100
+start_mirrors ${CLUSTER1}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image1}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image1} 'down+unknown'
+fi
+compare_images ${POOL} ${image1}
+
+testlog "TEST: test the first image is replaying after restart"
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+compare_images ${POOL} ${image}
+
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ testlog "TEST: stop/start/restart mirror via admin socket"
+ all_admin_daemons ${CLUSTER1} rbd mirror stop
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror start
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror restart
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror stop
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror restart
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
+
+ admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror start ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ admin_daemons ${CLUSTER1} rbd mirror restart ${POOL}/${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+
+ all_admin_daemons ${CLUSTER1} rbd mirror stop ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+stopped'
+
+ all_admin_daemons ${CLUSTER1} rbd mirror restart ${POOL} ${CLUSTER2}${PEER_CLUSTER_SUFFIX}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image1}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image1} 'up+replaying'
+
+ flush ${CLUSTER1}
+ all_admin_daemons ${CLUSTER1} rbd mirror status
+fi
+
+remove_image_retry ${CLUSTER2} ${POOL} ${image1}
+
+testlog "TEST: test image rename"
+new_name="${image}_RENAMED"
+rename_image ${CLUSTER2} ${POOL} ${image} ${new_name}
+mirror_image_snapshot ${CLUSTER2} ${POOL} ${new_name}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
+admin_daemons ${CLUSTER1} rbd mirror status ${POOL}/${new_name}
+admin_daemons ${CLUSTER1} rbd mirror restart ${POOL}/${new_name}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${new_name}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${new_name} 'up+replaying'
+rename_image ${CLUSTER2} ${POOL} ${new_name} ${image}
+mirror_image_snapshot ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+
+testlog "TEST: test trash move restore"
+image_id=$(get_image_id ${CLUSTER2} ${POOL} ${image})
+trash_move ${CLUSTER2} ${POOL} ${image}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+trash_restore ${CLUSTER2} ${POOL} ${image_id}
+enable_mirror ${CLUSTER2} ${POOL} ${image} snapshot
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+
+testlog "TEST: check if removed images' OMAP are removed (with rbd-mirror on one cluster)"
+remove_image_retry ${CLUSTER2} ${POOL} ${image}
+
+wait_for_image_in_omap ${CLUSTER1} ${POOL}
+wait_for_image_in_omap ${CLUSTER2} ${POOL}
+
+create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+
+testlog "TEST: failover and failback"
+start_mirrors ${CLUSTER2}
+
+# demote and promote same cluster
+demote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+compare_images ${POOL} ${image}
+
+# failover (unmodified)
+demote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
+
+# failback (unmodified)
+demote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
+compare_images ${POOL} ${image}
+
+# failover
+demote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
+write_image ${CLUSTER1} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+replaying'
+compare_images ${POOL} ${image}
+
+# failback
+demote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+promote_image ${CLUSTER2} ${POOL} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+write_image ${CLUSTER2} ${POOL} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
+compare_images ${POOL} ${image}
+
+testlog "TEST: failover / failback loop"
+for i in `seq 1 20`; do
+ demote_image ${CLUSTER2} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+ promote_image ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER2} ${POOL} ${image}
+ wait_for_replay_complete ${CLUSTER2} ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+replaying'
+ demote_image ${CLUSTER1} ${POOL} ${image}
+ wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+unknown'
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+unknown'
+ promote_image ${CLUSTER2} ${POOL} ${image}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${image} 'up+stopped'
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+done
+# check that demote (or other mirror snapshots) don't pile up
+test "$(count_mirror_snaps ${CLUSTER1} ${POOL} ${image})" -le 3
+test "$(count_mirror_snaps ${CLUSTER2} ${POOL} ${image})" -le 3
+
+testlog "TEST: force promote"
+force_promote_image=test_force_promote
+create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${force_promote_image}
+write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
+wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${force_promote_image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${force_promote_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+replaying'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
+promote_image ${CLUSTER1} ${POOL} ${force_promote_image} '--force'
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${force_promote_image}
+wait_for_image_replay_stopped ${CLUSTER2} ${POOL} ${force_promote_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${force_promote_image} 'up+stopped'
+wait_for_status_in_pool_dir ${CLUSTER2} ${POOL} ${force_promote_image} 'up+stopped'
+write_image ${CLUSTER1} ${POOL} ${force_promote_image} 100
+write_image ${CLUSTER2} ${POOL} ${force_promote_image} 100
+remove_image_retry ${CLUSTER1} ${POOL} ${force_promote_image}
+remove_image_retry ${CLUSTER2} ${POOL} ${force_promote_image}
+
+testlog "TEST: cloned images"
+testlog " - default"
+parent_image=test_parent
+parent_snap=snap
+create_image_and_enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+write_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} 100
+create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+protect_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+
+clone_image=test_clone
+clone_image ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap} ${POOL} ${clone_image}
+write_image ${CLUSTER2} ${POOL} ${clone_image} 100
+enable_mirror ${CLUSTER2} ${POOL} ${clone_image} snapshot
+
+wait_for_image_replay_started ${CLUSTER1} ${PARENT_POOL} ${parent_image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${PARENT_POOL} ${parent_image} 'up+replaying'
+compare_images ${PARENT_POOL} ${parent_image}
+
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${clone_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${clone_image} 'up+replaying'
+compare_images ${POOL} ${clone_image}
+remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}
+
+testlog " - clone v1"
+clone_image_and_enable_mirror ${CLUSTER1} ${PARENT_POOL} ${parent_image} \
+ ${parent_snap} ${POOL} ${clone_image}1
+
+clone_image_and_enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image} \
+ ${parent_snap} ${POOL} ${clone_image}_v1 snapshot --rbd-default-clone-format 1
+test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v1) = 1
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v1
+test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v1) = 1
+remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v1
+remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}1
+unprotect_snapshot_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+
+testlog " - clone v2"
+parent_snap=snap_v2
+create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+clone_image_and_enable_mirror ${CLUSTER2} ${PARENT_POOL} ${parent_image} \
+ ${parent_snap} ${POOL} ${clone_image}_v2 snapshot --rbd-default-clone-format 2
+test $(get_clone_format ${CLUSTER2} ${POOL} ${clone_image}_v2) = 2
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${clone_image}_v2
+test $(get_clone_format ${CLUSTER1} ${POOL} ${clone_image}_v2) = 2
+
+remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+test_snap_moved_to_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+remove_image_retry ${CLUSTER2} ${POOL} ${clone_image}_v2
+wait_for_image_present ${CLUSTER1} ${POOL} ${clone_image}_v2 'deleted'
+test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+
+testlog " - clone v2 non-primary"
+create_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+wait_for_snap_present ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+clone_image_and_enable_mirror ${CLUSTER1} ${PARENT_POOL} ${parent_image} \
+ ${parent_snap} ${POOL} ${clone_image}_v2 snapshot --rbd-default-clone-format 2
+remove_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+test_snap_removed_from_trash ${CLUSTER2} ${PARENT_POOL} ${parent_image} ${parent_snap}
+mirror_image_snapshot ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+wait_for_snap_moved_to_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+remove_image_retry ${CLUSTER1} ${POOL} ${clone_image}_v2
+wait_for_snap_removed_from_trash ${CLUSTER1} ${PARENT_POOL} ${parent_image} ${parent_snap}
+remove_image_retry ${CLUSTER2} ${PARENT_POOL} ${parent_image}
+
+testlog "TEST: data pool"
+dp_image=test_data_pool
+create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${dp_image} snapshot 128 --data-pool ${PARENT_POOL}
+data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL} ${dp_image})
+test "${data_pool}" = "${PARENT_POOL}"
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${dp_image}
+data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL} ${dp_image})
+test "${data_pool}" = "${PARENT_POOL}"
+create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap1'
+write_image ${CLUSTER2} ${POOL} ${dp_image} 100
+create_snapshot ${CLUSTER2} ${POOL} ${dp_image} 'snap2'
+write_image ${CLUSTER2} ${POOL} ${dp_image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${dp_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${dp_image} 'up+replaying'
+compare_images ${POOL} ${dp_image}@snap1
+compare_images ${POOL} ${dp_image}@snap2
+compare_images ${POOL} ${dp_image}
+remove_image_retry ${CLUSTER2} ${POOL} ${dp_image}
+
+testlog "TEST: disable mirroring / delete non-primary image"
+image2=test2
+image3=test3
+image4=test4
+image5=test5
+for i in ${image2} ${image3} ${image4} ${image5}; do
+ create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${i}
+ write_image ${CLUSTER2} ${POOL} ${i} 100
+ create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
+ create_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
+ if [ "${i}" = "${image4}" ] || [ "${i}" = "${image5}" ]; then
+ protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
+ protect_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
+ fi
+ write_image ${CLUSTER2} ${POOL} ${i} 100
+ mirror_image_snapshot ${CLUSTER2} ${POOL} ${i}
+ wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'present'
+ wait_for_snap_present ${CLUSTER1} ${POOL} ${i} 'snap2'
+done
+
+set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
+for i in ${image2} ${image4}; do
+ disable_mirror ${CLUSTER2} ${POOL} ${i}
+done
+
+unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap1'
+unprotect_snapshot ${CLUSTER2} ${POOL} ${image5} 'snap2'
+for i in ${image3} ${image5}; do
+ remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap1'
+ remove_snapshot ${CLUSTER2} ${POOL} ${i} 'snap2'
+ remove_image_retry ${CLUSTER2} ${POOL} ${i}
+done
+
+for i in ${image2} ${image3} ${image4} ${image5}; do
+ wait_for_image_present ${CLUSTER1} ${POOL} ${i} 'deleted'
+done
+
+testlog "TEST: snapshot rename"
+snap_name='snap_rename'
+enable_mirror ${CLUSTER2} ${POOL} ${image2}
+create_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_0"
+for i in `seq 1 20`; do
+ rename_snapshot ${CLUSTER2} ${POOL} ${image2} "${snap_name}_$(expr ${i} - 1)" "${snap_name}_${i}"
+done
+mirror_image_snapshot ${CLUSTER2} ${POOL} ${image2}
+wait_for_snap_present ${CLUSTER1} ${POOL} ${image2} "${snap_name}_${i}"
+
+unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap1'
+unprotect_snapshot ${CLUSTER2} ${POOL} ${image4} 'snap2'
+for i in ${image2} ${image4}; do
+ remove_image_retry ${CLUSTER2} ${POOL} ${i}
+done
+
+testlog "TEST: disable mirror while daemon is stopped"
+stop_mirrors ${CLUSTER1}
+stop_mirrors ${CLUSTER2}
+disable_mirror ${CLUSTER2} ${POOL} ${image}
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ test_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+fi
+start_mirrors ${CLUSTER1}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+enable_mirror ${CLUSTER2} ${POOL} ${image}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+
+testlog "TEST: non-default namespace image mirroring"
+testlog " - replay"
+create_image_and_enable_mirror ${CLUSTER2} ${POOL}/${NS1} ${image}
+create_image_and_enable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS1} ${image}
+wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS2} ${image}
+write_image ${CLUSTER2} ${POOL}/${NS1} ${image} 100
+write_image ${CLUSTER2} ${POOL}/${NS2} ${image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS2} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${image} 'up+replaying'
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS2} ${image} 'up+replaying'
+compare_images ${POOL}/${NS1} ${image}
+compare_images ${POOL}/${NS2} ${image}
+
+testlog " - disable mirroring / delete image"
+remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${image}
+disable_mirror ${CLUSTER2} ${POOL}/${NS2} ${image}
+wait_for_image_present ${CLUSTER1} ${POOL}/${NS1} ${image} 'deleted'
+wait_for_image_present ${CLUSTER1} ${POOL}/${NS2} ${image} 'deleted'
+remove_image_retry ${CLUSTER2} ${POOL}/${NS2} ${image}
+
+testlog " - data pool"
+dp_image=test_data_pool
+create_image_and_enable_mirror ${CLUSTER2} ${POOL}/${NS1} ${dp_image} snapshot 128 --data-pool ${PARENT_POOL}
+data_pool=$(get_image_data_pool ${CLUSTER2} ${POOL}/${NS1} ${dp_image})
+test "${data_pool}" = "${PARENT_POOL}"
+wait_for_image_replay_started ${CLUSTER1} ${POOL}/${NS1} ${dp_image}
+data_pool=$(get_image_data_pool ${CLUSTER1} ${POOL}/${NS1} ${dp_image})
+test "${data_pool}" = "${PARENT_POOL}"
+write_image ${CLUSTER2} ${POOL}/${NS1} ${dp_image} 100
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL}/${NS1} ${dp_image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL}/${NS1} ${dp_image} 'up+replaying'
+compare_images ${POOL}/${NS1} ${dp_image}
+remove_image_retry ${CLUSTER2} ${POOL}/${NS1} ${dp_image}
+
+testlog "TEST: simple image resync"
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+compare_images ${POOL} ${image}
+
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ testlog "TEST: image resync while replayer is stopped"
+ admin_daemons ${CLUSTER1} rbd mirror stop ${POOL}/${image}
+ wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+ request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+ admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
+ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+ admin_daemons ${CLUSTER1} rbd mirror start ${POOL}/${image}
+ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+ compare_images ${POOL} ${image}
+fi
+
+testlog "TEST: request image resync while daemon is offline"
+stop_mirrors ${CLUSTER1}
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+start_mirrors ${CLUSTER1}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted' ${image_id}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+compare_images ${POOL} ${image}
+remove_image_retry ${CLUSTER2} ${POOL} ${image}
+
+testlog "TEST: split-brain"
+image=split-brain
+create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+promote_image ${CLUSTER1} ${POOL} ${image} --force
+wait_for_image_replay_stopped ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+stopped'
+write_image ${CLUSTER1} ${POOL} ${image} 10
+demote_image ${CLUSTER1} ${POOL} ${image}
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+error' 'split-brain'
+request_resync_image ${CLUSTER1} ${POOL} ${image} image_id
+wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+remove_image_retry ${CLUSTER2} ${POOL} ${image}
+
+testlog "TEST: check if removed images' OMAP are removed"
+start_mirrors ${CLUSTER2}
+wait_for_image_in_omap ${CLUSTER1} ${POOL}
+wait_for_image_in_omap ${CLUSTER2} ${POOL}
+
+if [ -z "${RBD_MIRROR_USE_RBD_MIRROR}" ]; then
+ # teuthology will trash the daemon
+ testlog "TEST: no blocklists"
+ CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER1} osd blocklist ls 2>&1 | grep -q "listed 0 entries"
+ CEPH_ARGS='--id admin' ceph --cluster ${CLUSTER2} osd blocklist ls 2>&1 | grep -q "listed 0 entries"
+fi
diff --git a/qa/workunits/rbd/rbd_mirror_stress.sh b/qa/workunits/rbd/rbd_mirror_stress.sh
new file mode 100755
index 000000000..cb79aba7e
--- /dev/null
+++ b/qa/workunits/rbd/rbd_mirror_stress.sh
@@ -0,0 +1,221 @@
+#!/bin/sh -ex
+#
+# rbd_mirror_stress.sh - stress test rbd-mirror daemon
+#
+# The following additional environment variables affect the test:
+#
+# RBD_MIRROR_REDUCE_WRITES - if not empty, don't run the stress bench write
+# tool during the many image test
+#
+
+IMAGE_COUNT=50
+export LOCKDEP=0
+
+. $(dirname $0)/rbd_mirror_helpers.sh
+
+setup
+
+create_snap()
+{
+ local cluster=$1
+ local pool=$2
+ local image=$3
+ local snap_name=$4
+
+ rbd --cluster ${cluster} -p ${pool} snap create ${image}@${snap_name} \
+ --debug-rbd=20 --debug-journaler=20 2> ${TEMPDIR}/rbd-snap-create.log
+}
+
+compare_image_snaps()
+{
+ local pool=$1
+ local image=$2
+ local snap_name=$3
+ local ret=0
+
+ local rmt_export=${TEMPDIR}/${CLUSTER2}-${pool}-${image}.export
+ local loc_export=${TEMPDIR}/${CLUSTER1}-${pool}-${image}.export
+
+ rm -f ${rmt_export} ${loc_export}
+ rbd --cluster ${CLUSTER2} -p ${pool} export ${image}@${snap_name} ${rmt_export}
+ rbd --cluster ${CLUSTER1} -p ${pool} export ${image}@${snap_name} ${loc_export}
+ if ! cmp ${rmt_export} ${loc_export}
+ then
+ show_diff ${rmt_export} ${loc_export}
+ ret=1
+ fi
+ rm -f ${rmt_export} ${loc_export}
+ return ${ret}
+}
+
+wait_for_pool_images()
+{
+ local cluster=$1
+ local pool=$2
+ local image_count=$3
+ local s
+ local count
+ local last_count=0
+
+ while true; do
+ for s in `seq 1 40`; do
+ test $s -ne 1 && sleep 30
+ count=$(rbd --cluster ${cluster} -p ${pool} mirror pool status | grep 'images: ' | cut -d' ' -f 2)
+ test "${count}" = "${image_count}" && return 0
+
+ # reset timeout if making forward progress
+ test $count -ne $last_count && break
+ done
+
+ test $count -eq $last_count && break
+ last_count=$count
+ done
+ rbd --cluster ${cluster} -p ${pool} mirror pool status --verbose >&2
+ return 1
+}
+
+wait_for_pool_healthy()
+{
+ local cluster=$1
+ local pool=$2
+ local s
+ local state
+
+ for s in `seq 1 40`; do
+ test $s -ne 1 && sleep 30
+ state=$(rbd --cluster ${cluster} -p ${pool} mirror pool status | grep 'image health:' | cut -d' ' -f 3)
+ test "${state}" = "ERROR" && break
+ test "${state}" = "OK" && return 0
+ done
+ rbd --cluster ${cluster} -p ${pool} mirror pool status --verbose >&2
+ return 1
+}
+
+start_mirrors ${CLUSTER1}
+start_mirrors ${CLUSTER2}
+
+testlog "TEST: add image and test replay after client crashes"
+image=test
+create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image} ${MIRROR_IMAGE_MODE} '512M'
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+
+clean_snap_name=
+for i in `seq 1 10`
+do
+ stress_write_image ${CLUSTER2} ${POOL} ${image}
+
+ wait_for_status_in_pool_dir ${CLUSTER1} ${POOL} ${image} 'up+replaying'
+
+ snap_name="snap${i}"
+ create_snap ${CLUSTER2} ${POOL} ${image} ${snap_name}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+ wait_for_snap_present ${CLUSTER1} ${POOL} ${image} ${snap_name}
+
+ if [ -n "${clean_snap_name}" ]; then
+ compare_image_snaps ${POOL} ${image} ${clean_snap_name}
+ fi
+ compare_image_snaps ${POOL} ${image} ${snap_name}
+
+ clean_snap_name="snap${i}-clean"
+ create_snap ${CLUSTER2} ${POOL} ${image} ${clean_snap_name}
+done
+
+wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+wait_for_snap_present ${CLUSTER1} ${POOL} ${image} ${clean_snap_name}
+
+for i in `seq 1 10`
+do
+ snap_name="snap${i}"
+ compare_image_snaps ${POOL} ${image} ${snap_name}
+
+ snap_name="snap${i}-clean"
+ compare_image_snaps ${POOL} ${image} ${snap_name}
+done
+
+for i in `seq 1 10`
+do
+ snap_name="snap${i}"
+ remove_snapshot ${CLUSTER2} ${POOL} ${image} ${snap_name}
+
+ snap_name="snap${i}-clean"
+ remove_snapshot ${CLUSTER2} ${POOL} ${image} ${snap_name}
+done
+
+remove_image_retry ${CLUSTER2} ${POOL} ${image}
+wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+
+testlog "TEST: create many images"
+snap_name="snap"
+for i in `seq 1 ${IMAGE_COUNT}`
+do
+ image="image_${i}"
+ create_image_and_enable_mirror ${CLUSTER2} ${POOL} ${image} ${MIRROR_IMAGE_MODE} '128M'
+ if [ -n "${RBD_MIRROR_REDUCE_WRITES}" ]; then
+ write_image ${CLUSTER2} ${POOL} ${image} 100
+ else
+ stress_write_image ${CLUSTER2} ${POOL} ${image}
+ fi
+done
+
+wait_for_pool_images ${CLUSTER2} ${POOL} ${IMAGE_COUNT}
+wait_for_pool_healthy ${CLUSTER2} ${POOL}
+
+wait_for_pool_images ${CLUSTER1} ${POOL} ${IMAGE_COUNT}
+wait_for_pool_healthy ${CLUSTER1} ${POOL}
+
+testlog "TEST: compare many images"
+for i in `seq 1 ${IMAGE_COUNT}`
+do
+ image="image_${i}"
+ create_snap ${CLUSTER2} ${POOL} ${image} ${snap_name}
+ wait_for_image_replay_started ${CLUSTER1} ${POOL} ${image}
+ wait_for_replay_complete ${CLUSTER1} ${CLUSTER2} ${POOL} ${image}
+ wait_for_snap_present ${CLUSTER1} ${POOL} ${image} ${snap_name}
+ compare_image_snaps ${POOL} ${image} ${snap_name}
+done
+
+testlog "TEST: delete many images"
+for i in `seq 1 ${IMAGE_COUNT}`
+do
+ image="image_${i}"
+ remove_snapshot ${CLUSTER2} ${POOL} ${image} ${snap_name}
+ remove_image_retry ${CLUSTER2} ${POOL} ${image}
+done
+
+testlog "TEST: image deletions should propagate"
+wait_for_pool_images ${CLUSTER1} ${POOL} 0
+wait_for_pool_healthy ${CLUSTER1} ${POOL} 0
+for i in `seq 1 ${IMAGE_COUNT}`
+do
+ image="image_${i}"
+ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+done
+
+testlog "TEST: delete images during bootstrap"
+set_pool_mirror_mode ${CLUSTER1} ${POOL} 'image'
+set_pool_mirror_mode ${CLUSTER2} ${POOL} 'image'
+
+start_mirror ${CLUSTER1}
+image=test
+
+for i in `seq 1 10`
+do
+ image="image_${i}"
+ create_image ${CLUSTER2} ${POOL} ${image} '512M'
+ enable_mirror ${CLUSTER2} ${POOL} ${image}
+
+ stress_write_image ${CLUSTER2} ${POOL} ${image}
+ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'present'
+
+ disable_mirror ${CLUSTER2} ${POOL} ${image}
+ wait_for_image_present ${CLUSTER1} ${POOL} ${image} 'deleted'
+ purge_snapshots ${CLUSTER2} ${POOL} ${image}
+ remove_image_retry ${CLUSTER2} ${POOL} ${image}
+done
+
+testlog "TEST: check if removed images' OMAP are removed"
+
+wait_for_image_in_omap ${CLUSTER1} ${POOL}
+wait_for_image_in_omap ${CLUSTER2} ${POOL}
diff --git a/qa/workunits/rbd/rbd_support_module_recovery.sh b/qa/workunits/rbd/rbd_support_module_recovery.sh
new file mode 100755
index 000000000..e9defced2
--- /dev/null
+++ b/qa/workunits/rbd/rbd_support_module_recovery.sh
@@ -0,0 +1,77 @@
+#!/bin/bash
+set -ex
+
+POOL=rbd
+IMAGE_PREFIX=image
+NUM_IMAGES=20
+RUN_TIME=3600
+
+rbd mirror pool enable ${POOL} image
+rbd mirror pool peer add ${POOL} dummy
+
+# Create images and schedule their mirror snapshots
+for ((i = 1; i <= ${NUM_IMAGES}; i++)); do
+ rbd create -s 1G --image-feature exclusive-lock ${POOL}/${IMAGE_PREFIX}$i
+ rbd mirror image enable ${POOL}/${IMAGE_PREFIX}$i snapshot
+ rbd mirror snapshot schedule add -p ${POOL} --image ${IMAGE_PREFIX}$i 1m
+done
+
+# Run fio workloads on images via kclient
+# Test the recovery of the rbd_support module and its scheduler from their
+# librbd client being blocklisted while a exclusive lock gets passed around
+# between their librbd client and a kclient trying to take mirror snapshots
+# and perform I/O on the same image.
+for ((i = 1; i <= ${NUM_IMAGES}; i++)); do
+ DEVS[$i]=$(sudo rbd device map ${POOL}/${IMAGE_PREFIX}$i)
+ fio --name=fiotest --filename=${DEVS[$i]} --rw=randrw --bs=4K --direct=1 \
+ --ioengine=libaio --iodepth=2 --runtime=43200 --time_based \
+ &> /dev/null &
+done
+
+# Repeatedly blocklist rbd_support module's client ~10s after the module
+# recovers from previous blocklisting
+CURRENT_TIME=$(date +%s)
+END_TIME=$((CURRENT_TIME + RUN_TIME))
+PREV_CLIENT_ADDR=""
+CLIENT_ADDR=""
+while ((CURRENT_TIME <= END_TIME)); do
+ if [[ -n "${CLIENT_ADDR}" ]] &&
+ [[ "${CLIENT_ADDR}" != "${PREV_CLIENT_ADDR}" ]]; then
+ ceph osd blocklist add ${CLIENT_ADDR}
+ # Confirm rbd_support module's client is blocklisted
+ ceph osd blocklist ls | grep -q ${CLIENT_ADDR}
+ PREV_CLIENT_ADDR=${CLIENT_ADDR}
+ fi
+ sleep 10
+ CLIENT_ADDR=$(ceph mgr dump |
+ jq .active_clients[] |
+ jq 'select(.name == "rbd_support")' |
+ jq -r '[.addrvec[0].addr, "/", .addrvec[0].nonce|tostring] | add')
+ CURRENT_TIME=$(date +%s)
+done
+
+# Confirm that rbd_support module recovered from repeated blocklisting
+# Check that you can add a mirror snapshot schedule after a few retries
+for ((i = 1; i <= 24; i++)); do
+ rbd mirror snapshot schedule add -p ${POOL} \
+ --image ${IMAGE_PREFIX}1 2m && break
+ sleep 10
+done
+rbd mirror snapshot schedule ls -p ${POOL} --image ${IMAGE_PREFIX}1 |
+ grep 'every 2m'
+# Verify that the schedule present before client blocklisting is preserved
+rbd mirror snapshot schedule ls -p ${POOL} --image ${IMAGE_PREFIX}1 |
+ grep 'every 1m'
+rbd mirror snapshot schedule rm -p ${POOL} --image ${IMAGE_PREFIX}1 2m
+for ((i = 1; i <= ${NUM_IMAGES}; i++)); do
+ rbd mirror snapshot schedule rm -p ${POOL} --image ${IMAGE_PREFIX}$i 1m
+done
+
+# cleanup
+killall fio || true
+wait
+for ((i = 1; i <= ${NUM_IMAGES}; i++)); do
+ sudo rbd device unmap ${DEVS[$i]}
+done
+
+echo OK
diff --git a/qa/workunits/rbd/read-flags.sh b/qa/workunits/rbd/read-flags.sh
new file mode 100755
index 000000000..7d787ce67
--- /dev/null
+++ b/qa/workunits/rbd/read-flags.sh
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+set -ex
+
+# create a snapshot, then export it and check that setting read flags works
+# by looking at --debug-ms output
+
+function clean_up {
+ rm -f test.log || true
+ rbd snap remove test@snap || true
+ rbd rm test || true
+}
+
+function test_read_flags {
+ local IMAGE=$1
+ local SET_BALANCED=$2
+ local SET_LOCALIZED=$3
+ local EXPECT_BALANCED=$4
+ local EXPECT_LOCALIZED=$5
+
+ local EXTRA_ARGS="--log-file test.log --debug-ms 1 --no-log-to-stderr"
+ if [ "$SET_BALANCED" = 'y' ]; then
+ EXTRA_ARGS="$EXTRA_ARGS --rbd-balance-snap-reads"
+ elif [ "$SET_LOCALIZED" = 'y' ]; then
+ EXTRA_ARGS="$EXTRA_ARGS --rbd-localize-snap-reads"
+ fi
+
+ rbd export $IMAGE - $EXTRA_ARGS > /dev/null
+ if [ "$EXPECT_BALANCED" = 'y' ]; then
+ grep -q balance_reads test.log
+ else
+ grep -L balance_reads test.log | grep -q test.log
+ fi
+ if [ "$EXPECT_LOCALIZED" = 'y' ]; then
+ grep -q localize_reads test.log
+ else
+ grep -L localize_reads test.log | grep -q test.log
+ fi
+ rm -f test.log
+
+}
+
+clean_up
+
+trap clean_up INT TERM EXIT
+
+rbd create --image-feature layering -s 10 test
+rbd snap create test@snap
+
+# export from non snapshot with or without settings should not have flags
+test_read_flags test n n n n
+test_read_flags test y y n n
+
+# export from snapshot should have read flags in log if they are set
+test_read_flags test@snap n n n n
+test_read_flags test@snap y n y n
+test_read_flags test@snap n y n y
+
+# balanced_reads happens to take priority over localize_reads
+test_read_flags test@snap y y y n
+
+echo OK
diff --git a/qa/workunits/rbd/simple_big.sh b/qa/workunits/rbd/simple_big.sh
new file mode 100755
index 000000000..70aafda4c
--- /dev/null
+++ b/qa/workunits/rbd/simple_big.sh
@@ -0,0 +1,12 @@
+#!/bin/sh -ex
+
+mb=100000
+
+rbd create foo --size $mb
+DEV=$(sudo rbd map foo)
+dd if=/dev/zero of=$DEV bs=1M count=$mb
+dd if=$DEV of=/dev/null bs=1M count=$mb
+sudo rbd unmap $DEV
+rbd rm foo
+
+echo OK
diff --git a/qa/workunits/rbd/test_admin_socket.sh b/qa/workunits/rbd/test_admin_socket.sh
new file mode 100755
index 000000000..6b960787b
--- /dev/null
+++ b/qa/workunits/rbd/test_admin_socket.sh
@@ -0,0 +1,151 @@
+#!/usr/bin/env bash
+set -ex
+
+TMPDIR=/tmp/rbd_test_admin_socket$$
+mkdir $TMPDIR
+trap "rm -fr $TMPDIR" 0
+
+. $(dirname $0)/../../standalone/ceph-helpers.sh
+
+function expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+function rbd_watch_out_file()
+{
+ echo ${TMPDIR}/rbd_watch_$1.out
+}
+
+function rbd_watch_pid_file()
+{
+ echo ${TMPDIR}/rbd_watch_$1.pid
+}
+
+function rbd_watch_fifo()
+{
+ echo ${TMPDIR}/rbd_watch_$1.fifo
+}
+
+function rbd_watch_asok()
+{
+ echo ${TMPDIR}/rbd_watch_$1.asok
+}
+
+function rbd_get_perfcounter()
+{
+ local image=$1
+ local counter=$2
+ local name
+
+ name=$(ceph --format xml --admin-daemon $(rbd_watch_asok ${image}) \
+ perf schema | $XMLSTARLET el -d3 |
+ grep "/librbd-.*-${image}/${counter}\$")
+ test -n "${name}" || return 1
+
+ ceph --format xml --admin-daemon $(rbd_watch_asok ${image}) perf dump |
+ $XMLSTARLET sel -t -m "${name}" -v .
+}
+
+function rbd_check_perfcounter()
+{
+ local image=$1
+ local counter=$2
+ local expected_val=$3
+ local val=
+
+ val=$(rbd_get_perfcounter ${image} ${counter})
+
+ test "${val}" -eq "${expected_val}"
+}
+
+function rbd_watch_start()
+{
+ local image=$1
+ local asok=$(rbd_watch_asok ${image})
+
+ mkfifo $(rbd_watch_fifo ${image})
+ (cat $(rbd_watch_fifo ${image}) |
+ rbd --admin-socket ${asok} watch ${image} \
+ > $(rbd_watch_out_file ${image}) 2>&1)&
+
+ # find pid of the started rbd watch process
+ local pid
+ for i in `seq 10`; do
+ pid=$(ps auxww | awk "/[r]bd --admin.* watch ${image}/ {print \$2}")
+ test -n "${pid}" && break
+ sleep 0.1
+ done
+ test -n "${pid}"
+ echo ${pid} > $(rbd_watch_pid_file ${image})
+
+ # find watcher admin socket
+ test -n "${asok}"
+ for i in `seq 10`; do
+ test -S "${asok}" && break
+ sleep 0.1
+ done
+ test -S "${asok}"
+
+ # configure debug level
+ ceph --admin-daemon "${asok}" config set debug_rbd 20
+
+ # check that watcher is registered
+ rbd status ${image} | expect_false grep "Watchers: none"
+}
+
+function rbd_watch_end()
+{
+ local image=$1
+ local regexp=$2
+
+ # send 'enter' to watch to exit
+ echo > $(rbd_watch_fifo ${image})
+ # just in case it is not terminated
+ kill $(cat $(rbd_watch_pid_file ${image})) || :
+
+ # output rbd watch out file for easier troubleshooting
+ cat $(rbd_watch_out_file ${image})
+
+ # cleanup
+ rm -f $(rbd_watch_fifo ${image}) $(rbd_watch_pid_file ${image}) \
+ $(rbd_watch_out_file ${image}) $(rbd_watch_asok ${image})
+}
+
+pool="rbd"
+image=testimg$$
+ceph_admin="ceph --admin-daemon $(rbd_watch_asok ${image})"
+
+rbd create --size 128 ${pool}/${image}
+
+# check rbd cache commands are present in help output
+rbd_cache_flush="rbd cache flush ${pool}/${image}"
+rbd_cache_invalidate="rbd cache invalidate ${pool}/${image}"
+
+rbd_watch_start ${image}
+${ceph_admin} help | fgrep "${rbd_cache_flush}"
+${ceph_admin} help | fgrep "${rbd_cache_invalidate}"
+rbd_watch_end ${image}
+
+# test rbd cache commands with disabled and enabled cache
+for conf_rbd_cache in false true; do
+
+ rbd image-meta set ${image} conf_rbd_cache ${conf_rbd_cache}
+
+ rbd_watch_start ${image}
+
+ rbd_check_perfcounter ${image} flush 0
+ ${ceph_admin} ${rbd_cache_flush}
+ # 'flush' counter should increase regardless if cache is enabled
+ rbd_check_perfcounter ${image} flush 1
+
+ rbd_check_perfcounter ${image} invalidate_cache 0
+ ${ceph_admin} ${rbd_cache_invalidate}
+ # 'invalidate_cache' counter should increase regardless if cache is enabled
+ rbd_check_perfcounter ${image} invalidate_cache 1
+
+ rbd_watch_end ${image}
+done
+
+rbd rm ${image}
diff --git a/qa/workunits/rbd/test_librbd.sh b/qa/workunits/rbd/test_librbd.sh
new file mode 100755
index 000000000..447306bb4
--- /dev/null
+++ b/qa/workunits/rbd/test_librbd.sh
@@ -0,0 +1,9 @@
+#!/bin/sh -e
+
+if [ -n "${VALGRIND}" ]; then
+ valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
+ --error-exitcode=1 ceph_test_librbd
+else
+ ceph_test_librbd
+fi
+exit 0
diff --git a/qa/workunits/rbd/test_librbd_python.sh b/qa/workunits/rbd/test_librbd_python.sh
new file mode 100755
index 000000000..a33100829
--- /dev/null
+++ b/qa/workunits/rbd/test_librbd_python.sh
@@ -0,0 +1,12 @@
+#!/bin/sh -ex
+
+relpath=$(dirname $0)/../../../src/test/pybind
+
+if [ -n "${VALGRIND}" ]; then
+ valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
+ --errors-for-leak-kinds=definite --error-exitcode=1 \
+ python3 -m pytest -v $relpath/test_rbd.py "$@"
+else
+ python3 -m pytest -v $relpath/test_rbd.py "$@"
+fi
+exit 0
diff --git a/qa/workunits/rbd/test_lock_fence.sh b/qa/workunits/rbd/test_lock_fence.sh
new file mode 100755
index 000000000..7cf2d21c5
--- /dev/null
+++ b/qa/workunits/rbd/test_lock_fence.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+# can't use -e because of background process
+set -x
+
+IMAGE=rbdrw-image
+LOCKID=rbdrw
+RELPATH=$(dirname $0)/../../../src/test/librbd
+RBDRW=$RELPATH/rbdrw.py
+
+rbd create $IMAGE --size 10 --image-format 2 --image-shared || exit 1
+
+# rbdrw loops doing I/O to $IMAGE after locking with lockid $LOCKID
+python3 $RBDRW $IMAGE $LOCKID &
+iochild=$!
+
+# give client time to lock and start reading/writing
+LOCKS='[]'
+while [ "$LOCKS" == '[]' ]
+do
+ LOCKS=$(rbd lock list $IMAGE --format json)
+ sleep 1
+done
+
+clientaddr=$(rbd lock list $IMAGE | tail -1 | awk '{print $NF;}')
+clientid=$(rbd lock list $IMAGE | tail -1 | awk '{print $1;}')
+echo "clientaddr: $clientaddr"
+echo "clientid: $clientid"
+
+ceph osd blocklist add $clientaddr || exit 1
+
+wait $iochild
+rbdrw_exitcode=$?
+if [ $rbdrw_exitcode != 108 ]
+then
+ echo "wrong exitcode from rbdrw: $rbdrw_exitcode"
+ exit 1
+else
+ echo "rbdrw stopped with ESHUTDOWN"
+fi
+
+set -e
+ceph osd blocklist rm $clientaddr
+rbd lock remove $IMAGE $LOCKID "$clientid"
+# rbdrw will have exited with an existing watch, so, until #3527 is fixed,
+# hang out until the watch expires
+sleep 30
+rbd rm $IMAGE
+echo OK
diff --git a/qa/workunits/rbd/test_rbd_mirror.sh b/qa/workunits/rbd/test_rbd_mirror.sh
new file mode 100755
index 000000000..e139dd7e4
--- /dev/null
+++ b/qa/workunits/rbd/test_rbd_mirror.sh
@@ -0,0 +1,9 @@
+#!/bin/sh -e
+
+if [ -n "${VALGRIND}" ]; then
+ valgrind ${VALGRIND} --suppressions=${TESTDIR}/valgrind.supp \
+ --error-exitcode=1 ceph_test_rbd_mirror
+else
+ ceph_test_rbd_mirror
+fi
+exit 0
diff --git a/qa/workunits/rbd/test_rbd_tasks.sh b/qa/workunits/rbd/test_rbd_tasks.sh
new file mode 100755
index 000000000..b9663e601
--- /dev/null
+++ b/qa/workunits/rbd/test_rbd_tasks.sh
@@ -0,0 +1,276 @@
+#!/usr/bin/env bash
+set -ex
+
+POOL=rbd_tasks
+POOL_NS=ns1
+
+setup() {
+ trap 'cleanup' INT TERM EXIT
+
+ ceph osd pool create ${POOL} 128
+ rbd pool init ${POOL}
+ rbd namespace create ${POOL}/${POOL_NS}
+
+ TEMPDIR=`mktemp -d`
+}
+
+cleanup() {
+ ceph osd pool rm ${POOL} ${POOL} --yes-i-really-really-mean-it
+
+ rm -rf ${TEMPDIR}
+}
+
+wait_for() {
+ local TEST_FN=$1
+ shift 1
+ local TEST_FN_ARGS=("$@")
+
+ for s in 1 2 4 8 8 8 8 8 8 8 8 16 16; do
+ sleep ${s}
+
+ ${TEST_FN} "${TEST_FN_ARGS[@]}" || continue
+ return 0
+ done
+ return 1
+}
+
+task_exists() {
+ local TASK_ID=$1
+ [[ -z "${TASK_ID}" ]] && exit 1
+
+ ceph rbd task list ${TASK_ID} || return 1
+ return 0
+}
+
+task_dne() {
+ local TASK_ID=$1
+ [[ -z "${TASK_ID}" ]] && exit 1
+
+ ceph rbd task list ${TASK_ID} || return 0
+ return 1
+}
+
+task_in_progress() {
+ local TASK_ID=$1
+ [[ -z "${TASK_ID}" ]] && exit 1
+
+ [[ $(ceph rbd task list ${TASK_ID} | jq '.in_progress') == 'true' ]]
+}
+
+test_remove() {
+ echo "test_remove"
+
+ local IMAGE=`uuidgen`
+ rbd create --size 1 --image-shared ${POOL}/${IMAGE}
+
+ # MGR might require some time to discover the OSD map w/ new pool
+ wait_for ceph rbd task add remove ${POOL}/${IMAGE}
+}
+
+test_flatten() {
+ echo "test_flatten"
+
+ local PARENT_IMAGE=`uuidgen`
+ local CHILD_IMAGE=`uuidgen`
+
+ rbd create --size 1 --image-shared ${POOL}/${PARENT_IMAGE}
+ rbd snap create ${POOL}/${PARENT_IMAGE}@snap
+ rbd clone ${POOL}/${PARENT_IMAGE}@snap ${POOL}/${POOL_NS}/${CHILD_IMAGE} --rbd-default-clone-format=2
+ [[ "$(rbd info --format json ${POOL}/${POOL_NS}/${CHILD_IMAGE} | jq 'has("parent")')" == "true" ]]
+
+ local TASK_ID=`ceph rbd task add flatten ${POOL}/${POOL_NS}/${CHILD_IMAGE} | jq --raw-output ".id"`
+ wait_for task_dne ${TASK_ID}
+
+ [[ "$(rbd info --format json ${POOL}/${POOL_NS}/${CHILD_IMAGE} | jq 'has("parent")')" == "false" ]]
+}
+
+test_trash_remove() {
+ echo "test_trash_remove"
+
+ local IMAGE=`uuidgen`
+ rbd create --size 1 --image-shared ${POOL}/${IMAGE}
+ local IMAGE_ID=`rbd info --format json ${POOL}/${IMAGE} | jq --raw-output ".id"`
+ rbd trash mv ${POOL}/${IMAGE}
+ [[ -n "$(rbd trash list ${POOL})" ]] || exit 1
+
+ local TASK_ID=`ceph rbd task add trash remove ${POOL}/${IMAGE_ID} | jq --raw-output ".id"`
+ wait_for task_dne ${TASK_ID}
+
+ [[ -z "$(rbd trash list ${POOL})" ]] || exit 1
+}
+
+test_migration_execute() {
+ echo "test_migration_execute"
+
+ local SOURCE_IMAGE=`uuidgen`
+ local TARGET_IMAGE=`uuidgen`
+ rbd create --size 1 --image-shared ${POOL}/${SOURCE_IMAGE}
+ rbd migration prepare ${POOL}/${SOURCE_IMAGE} ${POOL}/${TARGET_IMAGE}
+ [[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq --raw-output '.migration.state')" == "prepared" ]]
+
+ local TASK_ID=`ceph rbd task add migration execute ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
+ wait_for task_dne ${TASK_ID}
+
+ [[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq --raw-output '.migration.state')" == "executed" ]]
+}
+
+test_migration_commit() {
+ echo "test_migration_commit"
+
+ local SOURCE_IMAGE=`uuidgen`
+ local TARGET_IMAGE=`uuidgen`
+ rbd create --size 1 --image-shared ${POOL}/${SOURCE_IMAGE}
+ rbd migration prepare ${POOL}/${SOURCE_IMAGE} ${POOL}/${TARGET_IMAGE}
+ [[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq --raw-output '.migration.state')" == "prepared" ]]
+
+ local TASK_ID=`ceph rbd task add migration execute ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
+ wait_for task_dne ${TASK_ID}
+
+ TASK_ID=`ceph rbd task add migration commit ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
+ wait_for task_dne ${TASK_ID}
+
+ [[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq 'has("migration")')" == "false" ]]
+ (rbd info ${POOL}/${SOURCE_IMAGE} && return 1) || true
+ rbd info ${POOL}/${TARGET_IMAGE}
+}
+
+test_migration_abort() {
+ echo "test_migration_abort"
+
+ local SOURCE_IMAGE=`uuidgen`
+ local TARGET_IMAGE=`uuidgen`
+ rbd create --size 1 --image-shared ${POOL}/${SOURCE_IMAGE}
+ rbd migration prepare ${POOL}/${SOURCE_IMAGE} ${POOL}/${TARGET_IMAGE}
+ [[ "$(rbd status --format json ${POOL}/${TARGET_IMAGE} | jq --raw-output '.migration.state')" == "prepared" ]]
+
+ local TASK_ID=`ceph rbd task add migration execute ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
+ wait_for task_dne ${TASK_ID}
+
+ TASK_ID=`ceph rbd task add migration abort ${POOL}/${TARGET_IMAGE} | jq --raw-output ".id"`
+ wait_for task_dne ${TASK_ID}
+
+ [[ "$(rbd status --format json ${POOL}/${SOURCE_IMAGE} | jq 'has("migration")')" == "false" ]]
+ rbd info ${POOL}/${SOURCE_IMAGE}
+ (rbd info ${POOL}/${TARGET_IMAGE} && return 1) || true
+}
+
+test_list() {
+ echo "test_list"
+
+ local IMAGE_1=`uuidgen`
+ local IMAGE_2=`uuidgen`
+
+ rbd create --size 1T --image-shared ${POOL}/${IMAGE_1}
+ rbd create --size 1T --image-shared ${POOL}/${IMAGE_2}
+
+ local TASK_ID_1=`ceph rbd task add remove ${POOL}/${IMAGE_1} | jq --raw-output ".id"`
+ local TASK_ID_2=`ceph rbd task add remove ${POOL}/${IMAGE_2} | jq --raw-output ".id"`
+
+ local LIST_FILE="${TEMPDIR}/list_file"
+ ceph rbd task list > ${LIST_FILE}
+ cat ${LIST_FILE}
+
+ [[ $(jq "[.[] | .id] | contains([\"${TASK_ID_1}\", \"${TASK_ID_2}\"])" ${LIST_FILE}) == "true" ]]
+
+ ceph rbd task cancel ${TASK_ID_1}
+ ceph rbd task cancel ${TASK_ID_2}
+}
+
+test_cancel() {
+ echo "test_cancel"
+
+ local IMAGE=`uuidgen`
+ rbd create --size 1T --image-shared ${POOL}/${IMAGE}
+ local TASK_ID=`ceph rbd task add remove ${POOL}/${IMAGE} | jq --raw-output ".id"`
+
+ wait_for task_exists ${TASK_ID}
+
+ ceph rbd task cancel ${TASK_ID}
+ wait_for task_dne ${TASK_ID}
+}
+
+test_duplicate_task() {
+ echo "test_duplicate_task"
+
+ local IMAGE=`uuidgen`
+ rbd create --size 1T --image-shared ${POOL}/${IMAGE}
+ local IMAGE_ID=`rbd info --format json ${POOL}/${IMAGE} | jq --raw-output ".id"`
+ rbd trash mv ${POOL}/${IMAGE}
+
+ local TASK_ID_1=`ceph rbd task add trash remove ${POOL}/${IMAGE_ID} | jq --raw-output ".id"`
+ local TASK_ID_2=`ceph rbd task add trash remove ${POOL}/${IMAGE_ID} | jq --raw-output ".id"`
+
+ [[ "${TASK_ID_1}" == "${TASK_ID_2}" ]]
+
+ ceph rbd task cancel ${TASK_ID_1}
+}
+
+test_duplicate_name() {
+ echo "test_duplicate_name"
+
+ local IMAGE=`uuidgen`
+ rbd create --size 1G --image-shared ${POOL}/${IMAGE}
+ local TASK_ID_1=`ceph rbd task add remove ${POOL}/${IMAGE} | jq --raw-output ".id"`
+
+ wait_for task_dne ${TASK_ID_1}
+
+ rbd create --size 1G --image-shared ${POOL}/${IMAGE}
+ local TASK_ID_2=`ceph rbd task add remove ${POOL}/${IMAGE} | jq --raw-output ".id"`
+
+ [[ "${TASK_ID_1}" != "${TASK_ID_2}" ]]
+ wait_for task_dne ${TASK_ID_2}
+
+ local TASK_ID_3=`ceph rbd task add remove ${POOL}/${IMAGE} | jq --raw-output ".id"`
+
+ [[ "${TASK_ID_2}" == "${TASK_ID_3}" ]]
+}
+
+test_progress() {
+ echo "test_progress"
+
+ local IMAGE_1=`uuidgen`
+ local IMAGE_2=`uuidgen`
+
+ rbd create --size 1 --image-shared ${POOL}/${IMAGE_1}
+ local TASK_ID_1=`ceph rbd task add remove ${POOL}/${IMAGE_1} | jq --raw-output ".id"`
+
+ wait_for task_dne ${TASK_ID_1}
+
+ local PROGRESS_FILE="${TEMPDIR}/progress_file"
+ ceph progress json > ${PROGRESS_FILE}
+ cat ${PROGRESS_FILE}
+
+ [[ $(jq "[.completed | .[].id] | contains([\"${TASK_ID_1}\"])" ${PROGRESS_FILE}) == "true" ]]
+
+ rbd create --size 1T --image-shared ${POOL}/${IMAGE_2}
+ local TASK_ID_2=`ceph rbd task add remove ${POOL}/${IMAGE_2} | jq --raw-output ".id"`
+
+ wait_for task_in_progress ${TASK_ID_2}
+ ceph progress json > ${PROGRESS_FILE}
+ cat ${PROGRESS_FILE}
+
+ [[ $(jq "[.events | .[].id] | contains([\"${TASK_ID_2}\"])" ${PROGRESS_FILE}) == "true" ]]
+
+ ceph rbd task cancel ${TASK_ID_2}
+ wait_for task_dne ${TASK_ID_2}
+
+ ceph progress json > ${PROGRESS_FILE}
+ cat ${PROGRESS_FILE}
+
+ [[ $(jq "[.completed | map(select(.failed)) | .[].id] | contains([\"${TASK_ID_2}\"])" ${PROGRESS_FILE}) == "true" ]]
+}
+
+setup
+test_remove
+test_flatten
+test_trash_remove
+test_migration_execute
+test_migration_commit
+test_migration_abort
+test_list
+test_cancel
+test_duplicate_task
+test_duplicate_name
+test_progress
+
+echo OK
diff --git a/qa/workunits/rbd/test_rbdmap_RBDMAPFILE.sh b/qa/workunits/rbd/test_rbdmap_RBDMAPFILE.sh
new file mode 100755
index 000000000..501c69cd5
--- /dev/null
+++ b/qa/workunits/rbd/test_rbdmap_RBDMAPFILE.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+#
+# Regression test for http://tracker.ceph.com/issues/14984
+#
+# When the bug is present, starting the rbdmap service causes
+# a bogus log message to be emitted to the log because the RBDMAPFILE
+# environment variable is not set.
+#
+# When the bug is not present, starting the rbdmap service will emit
+# no log messages, because /etc/ceph/rbdmap does not contain any lines
+# that require processing.
+#
+set -ex
+
+echo "TEST: save timestamp for use later with journalctl --since"
+TIMESTAMP=$(date +%Y-%m-%d\ %H:%M:%S)
+
+echo "TEST: assert that rbdmap has not logged anything since boot"
+journalctl -b 0 -t rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
+journalctl -b 0 -t init-rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
+
+echo "TEST: restart the rbdmap.service"
+sudo systemctl restart rbdmap.service
+
+echo "TEST: ensure that /usr/bin/rbdmap runs to completion"
+until sudo systemctl status rbdmap.service | grep 'active (exited)' ; do
+ sleep 0.5
+done
+
+echo "TEST: assert that rbdmap has not logged anything since TIMESTAMP"
+journalctl --since "$TIMESTAMP" -t rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
+journalctl --since "$TIMESTAMP" -t init-rbdmap | grep 'rbdmap\[[[:digit:]]' && exit 1
+
+exit 0
diff --git a/qa/workunits/rbd/verify_pool.sh b/qa/workunits/rbd/verify_pool.sh
new file mode 100755
index 000000000..08bcca506
--- /dev/null
+++ b/qa/workunits/rbd/verify_pool.sh
@@ -0,0 +1,27 @@
+#!/bin/sh -ex
+
+POOL_NAME=rbd_test_validate_pool
+PG_NUM=32
+
+tear_down () {
+ ceph osd pool delete $POOL_NAME $POOL_NAME --yes-i-really-really-mean-it || true
+}
+
+set_up () {
+ tear_down
+ ceph osd pool create $POOL_NAME $PG_NUM
+ ceph osd pool mksnap $POOL_NAME snap
+ rbd pool init $POOL_NAME
+}
+
+trap tear_down EXIT HUP INT
+set_up
+
+# creating an image in a pool-managed snapshot pool should fail
+rbd create --pool $POOL_NAME --size 1 foo && exit 1 || true
+
+# should succeed if the pool already marked as validated
+printf "overwrite validated" | rados --pool $POOL_NAME put rbd_info -
+rbd create --pool $POOL_NAME --size 1 foo
+
+echo OK