summaryrefslogtreecommitdiffstats
path: root/qa/workunits/rados
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /qa/workunits/rados
parentInitial commit. (diff)
downloadceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz
ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/workunits/rados')
-rwxr-xr-xqa/workunits/rados/clone.sh13
-rwxr-xr-xqa/workunits/rados/load-gen-big.sh10
-rwxr-xr-xqa/workunits/rados/load-gen-mix-small-long.sh10
-rwxr-xr-xqa/workunits/rados/load-gen-mix-small.sh10
-rwxr-xr-xqa/workunits/rados/load-gen-mix.sh10
-rwxr-xr-xqa/workunits/rados/load-gen-mostlyread.sh10
-rwxr-xr-xqa/workunits/rados/stress_watch.sh7
-rwxr-xr-xqa/workunits/rados/test.sh61
-rwxr-xr-xqa/workunits/rados/test_alloc_hint.sh177
-rwxr-xr-xqa/workunits/rados/test_cache_pool.sh170
-rwxr-xr-xqa/workunits/rados/test_crash.sh39
-rwxr-xr-xqa/workunits/rados/test_dedup_tool.sh156
-rwxr-xr-xqa/workunits/rados/test_envlibrados_for_rocksdb.sh94
-rwxr-xr-xqa/workunits/rados/test_hang.sh8
-rwxr-xr-xqa/workunits/rados/test_health_warnings.sh76
-rwxr-xr-xqa/workunits/rados/test_large_omap_detection.py134
-rwxr-xr-xqa/workunits/rados/test_librados_build.sh74
-rwxr-xr-xqa/workunits/rados/test_pool_access.sh108
-rwxr-xr-xqa/workunits/rados/test_pool_quota.sh68
-rwxr-xr-xqa/workunits/rados/test_python.sh4
-rwxr-xr-xqa/workunits/rados/test_rados_timeouts.sh48
-rwxr-xr-xqa/workunits/rados/test_rados_tool.sh924
22 files changed, 2211 insertions, 0 deletions
diff --git a/qa/workunits/rados/clone.sh b/qa/workunits/rados/clone.sh
new file mode 100755
index 00000000..281e89f7
--- /dev/null
+++ b/qa/workunits/rados/clone.sh
@@ -0,0 +1,13 @@
+#!/bin/sh -x
+
+set -e
+
+rados -p data rm foo || true
+rados -p data put foo.tmp /etc/passwd --object-locator foo
+rados -p data clonedata foo.tmp foo --object-locator foo
+rados -p data get foo /tmp/foo
+cmp /tmp/foo /etc/passwd
+rados -p data rm foo.tmp --object-locator foo
+rados -p data rm foo
+
+echo OK \ No newline at end of file
diff --git a/qa/workunits/rados/load-gen-big.sh b/qa/workunits/rados/load-gen-big.sh
new file mode 100755
index 00000000..6715658e
--- /dev/null
+++ b/qa/workunits/rados/load-gen-big.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+rados -p rbd load-gen \
+ --num-objects 10240 \
+ --min-object-size 1048576 \
+ --max-object-size 25600000 \
+ --max-ops 1024 \
+ --max-backlog 1024 \
+ --read-percent 50 \
+ --run-length 1200
diff --git a/qa/workunits/rados/load-gen-mix-small-long.sh b/qa/workunits/rados/load-gen-mix-small-long.sh
new file mode 100755
index 00000000..593bad51
--- /dev/null
+++ b/qa/workunits/rados/load-gen-mix-small-long.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+rados -p rbd load-gen \
+ --num-objects 1024 \
+ --min-object-size 1 \
+ --max-object-size 1048576 \
+ --max-ops 128 \
+ --max-backlog 128 \
+ --read-percent 50 \
+ --run-length 1800
diff --git a/qa/workunits/rados/load-gen-mix-small.sh b/qa/workunits/rados/load-gen-mix-small.sh
new file mode 100755
index 00000000..02db77bd
--- /dev/null
+++ b/qa/workunits/rados/load-gen-mix-small.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+rados -p rbd load-gen \
+ --num-objects 1024 \
+ --min-object-size 1 \
+ --max-object-size 1048576 \
+ --max-ops 128 \
+ --max-backlog 128 \
+ --read-percent 50 \
+ --run-length 600
diff --git a/qa/workunits/rados/load-gen-mix.sh b/qa/workunits/rados/load-gen-mix.sh
new file mode 100755
index 00000000..ad3b4be8
--- /dev/null
+++ b/qa/workunits/rados/load-gen-mix.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+rados -p rbd load-gen \
+ --num-objects 10240 \
+ --min-object-size 1 \
+ --max-object-size 1048576 \
+ --max-ops 128 \
+ --max-backlog 128 \
+ --read-percent 50 \
+ --run-length 600
diff --git a/qa/workunits/rados/load-gen-mostlyread.sh b/qa/workunits/rados/load-gen-mostlyread.sh
new file mode 100755
index 00000000..236f82dd
--- /dev/null
+++ b/qa/workunits/rados/load-gen-mostlyread.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+rados -p rbd load-gen \
+ --num-objects 51200 \
+ --min-object-size 1 \
+ --max-object-size 1048576 \
+ --max-ops 128 \
+ --max-backlog 128 \
+ --read-percent 90 \
+ --run-length 600
diff --git a/qa/workunits/rados/stress_watch.sh b/qa/workunits/rados/stress_watch.sh
new file mode 100755
index 00000000..49f144bb
--- /dev/null
+++ b/qa/workunits/rados/stress_watch.sh
@@ -0,0 +1,7 @@
+#!/bin/sh -e
+
+ceph_test_stress_watch
+ceph_multi_stress_watch rep reppool repobj
+ceph_multi_stress_watch ec ecpool ecobj
+
+exit 0
diff --git a/qa/workunits/rados/test.sh b/qa/workunits/rados/test.sh
new file mode 100755
index 00000000..a0b2aed5
--- /dev/null
+++ b/qa/workunits/rados/test.sh
@@ -0,0 +1,61 @@
+#!/usr/bin/env bash
+set -ex
+
+parallel=1
+[ "$1" = "--serial" ] && parallel=0
+
+color=""
+[ -t 1 ] && color="--gtest_color=yes"
+
+function cleanup() {
+ pkill -P $$ || true
+}
+trap cleanup EXIT ERR HUP INT QUIT
+
+declare -A pids
+
+for f in \
+ api_aio api_aio_pp \
+ api_io api_io_pp \
+ api_asio api_list \
+ api_lock api_lock_pp \
+ api_misc api_misc_pp \
+ api_tier_pp \
+ api_pool \
+ api_snapshots api_snapshots_pp \
+ api_stat api_stat_pp \
+ api_watch_notify api_watch_notify_pp \
+ api_cmd api_cmd_pp \
+ api_service api_service_pp \
+ api_c_write_operations \
+ api_c_read_operations \
+ list_parallel \
+ open_pools_parallel \
+ delete_pools_parallel
+do
+ if [ $parallel -eq 1 ]; then
+ r=`printf '%25s' $f`
+ ff=`echo $f | awk '{print $1}'`
+ bash -o pipefail -exc "ceph_test_rados_$f $color 2>&1 | tee ceph_test_rados_$ff.log | sed \"s/^/$r: /\"" &
+ pid=$!
+ echo "test $f on pid $pid"
+ pids[$f]=$pid
+ else
+ ceph_test_rados_$f
+ fi
+done
+
+ret=0
+if [ $parallel -eq 1 ]; then
+for t in "${!pids[@]}"
+do
+ pid=${pids[$t]}
+ if ! wait $pid
+ then
+ echo "error in $t ($pid)"
+ ret=1
+ fi
+done
+fi
+
+exit $ret
diff --git a/qa/workunits/rados/test_alloc_hint.sh b/qa/workunits/rados/test_alloc_hint.sh
new file mode 100755
index 00000000..2323915f
--- /dev/null
+++ b/qa/workunits/rados/test_alloc_hint.sh
@@ -0,0 +1,177 @@
+#!/usr/bin/env bash
+
+set -ex
+shopt -s nullglob # fns glob expansion in expect_alloc_hint_eq()
+
+#
+# Helpers
+#
+
+function get_xml_val() {
+ local xml="$1"
+ local tag="$2"
+
+ local regex=".*<${tag}>(.*)</${tag}>.*"
+ if [[ ! "${xml}" =~ ${regex} ]]; then
+ echo "'${xml}' xml doesn't match '${tag}' tag regex" >&2
+ return 2
+ fi
+
+ echo "${BASH_REMATCH[1]}"
+}
+
+function get_conf_val() {
+ set -e
+
+ local entity="$1"
+ local option="$2"
+
+ local val
+ val="$(sudo ceph daemon "${entity}" config get --format=xml "${option}")"
+ val="$(get_xml_val "${val}" "${option}")"
+
+ echo "${val}"
+}
+
+function setup_osd_data() {
+ for (( i = 0 ; i < "${NUM_OSDS}" ; i++ )); do
+ OSD_DATA[i]="$(get_conf_val "osd.$i" "osd_data")"
+ done
+}
+
+function setup_pgid() {
+ local poolname="$1"
+ local objname="$2"
+
+ local pgid
+ pgid="$(ceph osd map "${poolname}" "${objname}" --format=xml)"
+ pgid="$(get_xml_val "${pgid}" "pgid")"
+
+ PGID="${pgid}"
+}
+
+function expect_alloc_hint_eq() {
+ export CEPH_ARGS="--osd-objectstore=filestore"
+ local expected_extsize="$1"
+
+ for (( i = 0 ; i < "${NUM_OSDS}" ; i++ )); do
+ # Make sure that stuff is flushed from the journal to the store
+ # by the time we get to it, as we prod the actual files and not
+ # the journal.
+ sudo ceph daemon "osd.${i}" "flush_journal"
+
+ # e.g., .../25.6_head/foo__head_7FC1F406__19
+ # .../26.bs1_head/bar__head_EFE6384B__1a_ffffffffffffffff_1
+ local fns=$(sudo sh -c "ls ${OSD_DATA[i]}/current/${PGID}*_head/${OBJ}_*")
+ local count="${#fns[@]}"
+ if [ "${count}" -ne 1 ]; then
+ echo "bad fns count: ${count}" >&2
+ return 2
+ fi
+
+ local extsize
+ extsize="$(sudo xfs_io -c extsize "${fns[0]}")"
+ local extsize_regex="^\[(.*)\] ${fns[0]}$"
+ if [[ ! "${extsize}" =~ ${extsize_regex} ]]; then
+ echo "extsize doesn't match extsize_regex: ${extsize}" >&2
+ return 2
+ fi
+ extsize="${BASH_REMATCH[1]}"
+
+ if [ "${extsize}" -ne "${expected_extsize}" ]; then
+ echo "FAIL: alloc_hint: actual ${extsize}, expected ${expected_extsize}" >&2
+ return 1
+ fi
+ done
+}
+
+#
+# Global setup
+#
+
+EC_K="2"
+EC_M="1"
+NUM_OSDS="$((EC_K + EC_M))"
+
+NUM_PG="12"
+NUM_PGP="${NUM_PG}"
+
+LOW_CAP="$(get_conf_val "osd.0" "filestore_max_alloc_hint_size")"
+HIGH_CAP="$((LOW_CAP * 10))" # 10M, assuming 1M default cap
+SMALL_HINT="$((LOW_CAP / 4))" # 256K, assuming 1M default cap
+BIG_HINT="$((LOW_CAP * 6))" # 6M, assuming 1M default cap
+
+setup_osd_data
+
+#
+# ReplicatedBackend tests
+#
+
+POOL="alloc_hint-rep"
+ceph osd pool create "${POOL}" "${NUM_PG}"
+ceph osd pool set "${POOL}" size "${NUM_OSDS}"
+ceph osd pool application enable "${POOL}" rados
+
+OBJ="foo"
+setup_pgid "${POOL}" "${OBJ}"
+rados -p "${POOL}" create "${OBJ}"
+
+# Empty object, SMALL_HINT - expect SMALL_HINT
+rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}"
+expect_alloc_hint_eq "${SMALL_HINT}"
+
+# Try changing to BIG_HINT (1) - expect LOW_CAP (BIG_HINT > LOW_CAP)
+rados -p "${POOL}" set-alloc-hint "${OBJ}" "${BIG_HINT}" "${BIG_HINT}"
+expect_alloc_hint_eq "${LOW_CAP}"
+
+# Bump the cap to HIGH_CAP
+ceph tell 'osd.*' injectargs "--filestore_max_alloc_hint_size ${HIGH_CAP}"
+
+# Try changing to BIG_HINT (2) - expect BIG_HINT (BIG_HINT < HIGH_CAP)
+rados -p "${POOL}" set-alloc-hint "${OBJ}" "${BIG_HINT}" "${BIG_HINT}"
+expect_alloc_hint_eq "${BIG_HINT}"
+
+ceph tell 'osd.*' injectargs "--filestore_max_alloc_hint_size ${LOW_CAP}"
+
+# Populate object with some data
+rados -p "${POOL}" put "${OBJ}" /etc/passwd
+
+# Try changing back to SMALL_HINT - expect BIG_HINT (non-empty object)
+rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}"
+expect_alloc_hint_eq "${BIG_HINT}"
+
+OBJ="bar"
+setup_pgid "${POOL}" "${OBJ}"
+
+# Non-existent object, SMALL_HINT - expect SMALL_HINT (object creation)
+rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}"
+expect_alloc_hint_eq "${SMALL_HINT}"
+
+ceph osd pool delete "${POOL}" "${POOL}" --yes-i-really-really-mean-it
+
+#
+# ECBackend tests
+#
+
+PROFILE="alloc_hint-ecprofile"
+POOL="alloc_hint-ec"
+ceph osd erasure-code-profile set "${PROFILE}" k=2 m=1 crush-failure-domain=osd
+ceph osd erasure-code-profile get "${PROFILE}" # just so it's logged
+ceph osd pool create "${POOL}" "${NUM_PG}" "${NUM_PGP}" erasure "${PROFILE}"
+ceph osd pool application enable "${POOL}" rados
+
+OBJ="baz"
+setup_pgid "${POOL}" "${OBJ}"
+rados -p "${POOL}" create "${OBJ}"
+
+# Empty object, SMALL_HINT - expect scaled-down SMALL_HINT
+rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}"
+expect_alloc_hint_eq "$((SMALL_HINT / EC_K))"
+
+ceph osd pool delete "${POOL}" "${POOL}" --yes-i-really-really-mean-it
+
+#
+# Global teardown
+#
+
+echo "OK"
diff --git a/qa/workunits/rados/test_cache_pool.sh b/qa/workunits/rados/test_cache_pool.sh
new file mode 100755
index 00000000..5e28b355
--- /dev/null
+++ b/qa/workunits/rados/test_cache_pool.sh
@@ -0,0 +1,170 @@
+#!/usr/bin/env bash
+
+set -ex
+
+expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+# create pools, set up tier relationship
+ceph osd pool create base_pool 2
+ceph osd pool application enable base_pool rados
+ceph osd pool create partial_wrong 2
+ceph osd pool create wrong_cache 2
+ceph osd tier add base_pool partial_wrong
+ceph osd tier add base_pool wrong_cache
+
+# populate base_pool with some data
+echo "foo" > foo.txt
+echo "bar" > bar.txt
+echo "baz" > baz.txt
+rados -p base_pool put fooobj foo.txt
+rados -p base_pool put barobj bar.txt
+# fill in wrong_cache backwards so we can tell we read from it
+rados -p wrong_cache put fooobj bar.txt
+rados -p wrong_cache put barobj foo.txt
+# partial_wrong gets barobj backwards so we can check promote and non-promote
+rados -p partial_wrong put barobj foo.txt
+
+# get the objects back before setting a caching pool
+rados -p base_pool get fooobj tmp.txt
+diff -q tmp.txt foo.txt
+rados -p base_pool get barobj tmp.txt
+diff -q tmp.txt bar.txt
+
+# set up redirect and make sure we get backwards results
+ceph osd tier set-overlay base_pool wrong_cache
+ceph osd tier cache-mode wrong_cache writeback
+rados -p base_pool get fooobj tmp.txt
+diff -q tmp.txt bar.txt
+rados -p base_pool get barobj tmp.txt
+diff -q tmp.txt foo.txt
+
+# switch cache pools and make sure we're doing promote
+ceph osd tier remove-overlay base_pool
+ceph osd tier set-overlay base_pool partial_wrong
+ceph osd tier cache-mode partial_wrong writeback
+rados -p base_pool get fooobj tmp.txt
+diff -q tmp.txt foo.txt # hurray, it promoted!
+rados -p base_pool get barobj tmp.txt
+diff -q tmp.txt foo.txt # yep, we read partial_wrong's local object!
+
+# try a nonexistent object and make sure we get an error
+expect_false rados -p base_pool get bazobj tmp.txt
+
+# drop the cache entirely and make sure contents are still the same
+ceph osd tier remove-overlay base_pool
+rados -p base_pool get fooobj tmp.txt
+diff -q tmp.txt foo.txt
+rados -p base_pool get barobj tmp.txt
+diff -q tmp.txt bar.txt
+
+# create an empty cache pool and make sure it has objects after reading
+ceph osd pool create empty_cache 2
+
+touch empty.txt
+rados -p empty_cache ls > tmp.txt
+diff -q tmp.txt empty.txt
+
+ceph osd tier add base_pool empty_cache
+ceph osd tier set-overlay base_pool empty_cache
+ceph osd tier cache-mode empty_cache writeback
+rados -p base_pool get fooobj tmp.txt
+rados -p base_pool get barobj tmp.txt
+expect_false rados -p base_pool get bazobj tmp.txt
+
+rados -p empty_cache ls > tmp.txt
+expect_false diff -q tmp.txt empty.txt
+
+# cleanup
+ceph osd tier remove-overlay base_pool
+ceph osd tier remove base_pool wrong_cache
+ceph osd tier remove base_pool partial_wrong
+ceph osd tier remove base_pool empty_cache
+ceph osd pool delete base_pool base_pool --yes-i-really-really-mean-it
+ceph osd pool delete empty_cache empty_cache --yes-i-really-really-mean-it
+ceph osd pool delete wrong_cache wrong_cache --yes-i-really-really-mean-it
+ceph osd pool delete partial_wrong partial_wrong --yes-i-really-really-mean-it
+
+## set of base, cache
+ceph osd pool create base 8
+ceph osd pool application enable base rados
+ceph osd pool create cache 8
+
+ceph osd tier add base cache
+ceph osd tier cache-mode cache writeback
+ceph osd tier set-overlay base cache
+
+# cache-flush, cache-evict
+rados -p base put foo /etc/passwd
+expect_false rados -p base cache-evict foo
+expect_false rados -p base cache-flush foo
+expect_false rados -p cache cache-evict foo
+rados -p cache cache-flush foo
+rados -p cache cache-evict foo
+rados -p cache ls - | wc -l | grep 0
+
+# cache-try-flush, cache-evict
+rados -p base put foo /etc/passwd
+expect_false rados -p base cache-evict foo
+expect_false rados -p base cache-flush foo
+expect_false rados -p cache cache-evict foo
+rados -p cache cache-try-flush foo
+rados -p cache cache-evict foo
+rados -p cache ls - | wc -l | grep 0
+
+# cache-flush-evict-all
+rados -p base put bar /etc/passwd
+rados -p cache ls - | wc -l | grep 1
+expect_false rados -p base cache-flush-evict-all
+rados -p cache cache-flush-evict-all
+rados -p cache ls - | wc -l | grep 0
+
+# cache-try-flush-evict-all
+rados -p base put bar /etc/passwd
+rados -p cache ls - | wc -l | grep 1
+expect_false rados -p base cache-flush-evict-all
+rados -p cache cache-try-flush-evict-all
+rados -p cache ls - | wc -l | grep 0
+
+# cache flush/evit when clone objects exist
+rados -p base put testclone /etc/passwd
+rados -p cache ls - | wc -l | grep 1
+ceph osd pool mksnap base snap
+rados -p base put testclone /etc/hosts
+rados -p cache cache-flush-evict-all
+rados -p cache ls - | wc -l | grep 0
+
+ceph osd tier cache-mode cache forward --yes-i-really-mean-it
+rados -p base -s snap get testclone testclone.txt
+diff -q testclone.txt /etc/passwd
+rados -p base get testclone testclone.txt
+diff -q testclone.txt /etc/hosts
+
+# test --with-clones option
+ceph osd tier cache-mode cache writeback
+rados -p base put testclone2 /etc/passwd
+rados -p cache ls - | wc -l | grep 1
+ceph osd pool mksnap base snap1
+rados -p base put testclone2 /etc/hosts
+expect_false rados -p cache cache-flush testclone2
+rados -p cache cache-flush testclone2 --with-clones
+expect_false rados -p cache cache-evict testclone2
+rados -p cache cache-evict testclone2 --with-clones
+rados -p cache ls - | wc -l | grep 0
+
+rados -p base -s snap1 get testclone2 testclone2.txt
+diff -q testclone2.txt /etc/passwd
+rados -p base get testclone2 testclone2.txt
+diff -q testclone2.txt /etc/hosts
+
+# cleanup
+ceph osd tier remove-overlay base
+ceph osd tier remove base cache
+
+ceph osd pool delete cache cache --yes-i-really-really-mean-it
+ceph osd pool delete base base --yes-i-really-really-mean-it
+
+echo OK
diff --git a/qa/workunits/rados/test_crash.sh b/qa/workunits/rados/test_crash.sh
new file mode 100755
index 00000000..6608d787
--- /dev/null
+++ b/qa/workunits/rados/test_crash.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+
+set -x
+
+# run on a single-node three-OSD cluster
+
+sudo killall -ABRT ceph-osd
+sleep 5
+
+# kill caused coredumps; find them and delete them, carefully, so as
+# not to disturb other coredumps, or else teuthology will see them
+# and assume test failure. sudos are because the core files are
+# root/600
+for f in $(find $TESTDIR/archive/coredump -type f); do
+ gdb_output=$(echo "quit" | sudo gdb /usr/bin/ceph-osd $f)
+ if expr match "$gdb_output" ".*generated.*ceph-osd.*" && \
+ ( \
+
+ expr match "$gdb_output" ".*terminated.*signal 6.*" || \
+ expr match "$gdb_output" ".*terminated.*signal SIGABRT.*" \
+ )
+ then
+ sudo rm $f
+ fi
+done
+
+# let daemon find crashdumps on startup
+sudo systemctl restart ceph-crash
+sleep 30
+
+# must be 3 crashdumps registered and moved to crash/posted
+[ $(ceph crash ls | wc -l) = 4 ] || exit 1 # 4 here bc of the table header
+[ $(sudo find /var/lib/ceph/crash/posted/ -name meta | wc -l) = 3 ] || exit 1
+
+# there should be a health warning
+ceph health detail | grep RECENT_CRASH || exit 1
+ceph crash archive-all
+sleep 30
+ceph health detail | grep -c RECENT_CRASH | grep 0 # should be gone!
diff --git a/qa/workunits/rados/test_dedup_tool.sh b/qa/workunits/rados/test_dedup_tool.sh
new file mode 100755
index 00000000..25994401
--- /dev/null
+++ b/qa/workunits/rados/test_dedup_tool.sh
@@ -0,0 +1,156 @@
+#!/usr/bin/env bash
+
+set -x
+
+die() {
+ echo "$@"
+ exit 1
+}
+
+do_run() {
+ if [ "$1" == "--tee" ]; then
+ shift
+ tee_out="$1"
+ shift
+ "$@" | tee $tee_out
+ else
+ "$@"
+ fi
+}
+
+run_expect_succ() {
+ echo "RUN_EXPECT_SUCC: " "$@"
+ do_run "$@"
+ [ $? -ne 0 ] && die "expected success, but got failure! cmd: $@"
+}
+
+run() {
+ echo "RUN: " $@
+ do_run "$@"
+}
+
+if [ -n "$CEPH_BIN" ] ; then
+ # CMake env
+ RADOS_TOOL="$CEPH_BIN/rados"
+ CEPH_TOOL="$CEPH_BIN/ceph"
+ DEDUP_TOOL="$CEPH_BIN/cephdeduptool"
+else
+ # executables should be installed by the QA env
+ RADOS_TOOL=$(which rados)
+ CEPH_TOOL=$(which ceph)
+ DEDUP_TOOL=$(which cephdeduptool)
+fi
+
+POOL=dedup_pool
+OBJ=test_rados_obj
+
+[ -x "$RADOS_TOOL" ] || die "couldn't find $RADOS_TOOL binary to test"
+[ -x "$CEPH_TOOL" ] || die "couldn't find $CEPH_TOOL binary to test"
+
+run_expect_succ "$CEPH_TOOL" osd pool create "$POOL" 8
+
+function test_dedup_ratio_fixed()
+{
+ # case 1
+ dd if=/dev/urandom of=dedup_object_1k bs=1K count=1
+ dd if=dedup_object_1k of=dedup_object_100k bs=1K count=100
+
+ $RADOS_TOOL -p $POOL put $OBJ ./dedup_object_100k
+ RESULT=$($DEDUP_TOOL --op estimate --pool $POOL --chunk-size 1024 --chunk-algorithm fixed --fingerprint-algorithm sha1 --debug | grep result | awk '{print$4}')
+ if [ 1024 -ne $RESULT ];
+ then
+ die "Estimate failed expecting 1024 result $RESULT"
+ fi
+
+ # case 2
+ dd if=/dev/zero of=dedup_object_10m bs=10M count=1
+
+ $RADOS_TOOL -p $POOL put $OBJ ./dedup_object_10m
+ RESULT=$($DEDUP_TOOL --op estimate --pool $POOL --chunk-size 4096 --chunk-algorithm fixed --fingerprint-algorithm sha1 --debug | grep result | awk '{print$4}')
+ if [ 4096 -ne $RESULT ];
+ then
+ die "Estimate failed expecting 4096 result $RESULT"
+ fi
+
+ # case 3 max_thread
+ for num in `seq 0 20`
+ do
+ dd if=/dev/zero of=dedup_object_$num bs=4M count=1
+ $RADOS_TOOL -p $POOL put dedup_object_$num ./dedup_object_$num
+ done
+
+ RESULT=$($DEDUP_TOOL --op estimate --pool $POOL --chunk-size 4096 --chunk-algorithm fixed --fingerprint-algorithm sha1 --max-thread 4 --debug | grep result | awk '{print$2}')
+
+ if [ 98566144 -ne $RESULT ];
+ then
+ die "Estimate failed expecting 98566144 result $RESULT"
+ fi
+
+ rm -rf ./dedup_object_1k ./dedup_object_100k ./dedup_object_10m
+ for num in `seq 0 20`
+ do
+ rm -rf ./dedup_object_$num
+ done
+}
+
+function test_dedup_chunk_scrub()
+{
+
+ CHUNK_POOL=dedup_chunk_pool
+ run_expect_succ "$CEPH_TOOL" osd pool create "$CHUNK_POOL" 8
+
+ echo "hi there" > foo
+
+ echo "hi there" > bar
+
+ echo "there" > foo-chunk
+
+ echo "CHUNK" > bar-chunk
+
+ $CEPH_TOOL osd pool set $POOL fingerprint_algorithm sha1 --yes-i-really-mean-it
+
+ $RADOS_TOOL -p $POOL put foo ./foo
+ $RADOS_TOOL -p $POOL put bar ./bar
+
+ $RADOS_TOOL -p $CHUNK_POOL put bar-chunk ./bar-chunk
+ $RADOS_TOOL -p $CHUNK_POOL put foo-chunk ./foo-chunk
+
+ $RADOS_TOOL -p $POOL set-chunk bar 0 8 --target-pool $CHUNK_POOL bar-chunk 0 --with-reference
+ $RADOS_TOOL -p $POOL set-chunk foo 0 8 --target-pool $CHUNK_POOL foo-chunk 0 --with-reference
+
+ echo "There hi" > test_obj
+ # dirty
+ $RADOS_TOOL -p $POOL put foo ./test_obj
+ # flush
+ $RADOS_TOOL -p $POOL put foo ./test_obj
+ sleep 2
+
+ rados ls -p $CHUNK_POOL
+ CHUNK_OID=$(echo -n "There hi" | sha1sum)
+
+ $DEDUP_TOOL --op add_chunk_ref --pool $POOL --chunk_pool $CHUNK_POOL --object $CHUNK_OID --target_ref bar
+ RESULT=$($DEDUP_TOOL --op get_chunk_ref --pool $POOL --chunk_pool $CHUNK_POOL --object $CHUNK_OID)
+
+ $DEDUP_TOOL --op chunk_scrub --pool $POOL --chunk_pool $CHUNK_POOL
+
+ RESULT=$($DEDUP_TOOL --op get_chunk_ref --pool $POOL --chunk_pool $CHUNK_POOL --object $CHUNK_OID | grep bar)
+ if [ -n "$RESULT" ] ; then
+ $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+ die "Scrub failed expecting bar is removed"
+ fi
+
+ $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it
+
+ rm -rf ./foo ./bar ./foo-chunk ./bar-chunk ./test_obj
+}
+
+test_dedup_ratio_fixed
+test_dedup_chunk_scrub
+
+$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+
+echo "SUCCESS!"
+exit 0
+
+
diff --git a/qa/workunits/rados/test_envlibrados_for_rocksdb.sh b/qa/workunits/rados/test_envlibrados_for_rocksdb.sh
new file mode 100755
index 00000000..7099dafb
--- /dev/null
+++ b/qa/workunits/rados/test_envlibrados_for_rocksdb.sh
@@ -0,0 +1,94 @@
+#!/usr/bin/env bash
+set -ex
+
+############################################
+# Helper functions
+############################################
+source $(dirname $0)/../ceph-helpers-root.sh
+
+############################################
+# Install required tools
+############################################
+echo "Install required tools"
+
+CURRENT_PATH=`pwd`
+
+############################################
+# Compile&Start RocksDB
+############################################
+# install prerequisites
+# for rocksdb
+case $(distro_id) in
+ ubuntu|debian|devuan)
+ install git g++ libsnappy-dev zlib1g-dev libbz2-dev libradospp-dev cmake
+ ;;
+ centos|fedora|rhel)
+ install git gcc-c++.x86_64 snappy-devel zlib zlib-devel bzip2 bzip2-devel libradospp-devel.x86_64
+ if [ $(distro_id) = "fedora" ]; then
+ install cmake
+ else
+ install_cmake3_on_centos7
+ fi
+ ;;
+ opensuse*|suse|sles)
+ install git gcc-c++ snappy-devel zlib-devel libbz2-devel libradospp-devel
+ ;;
+ *)
+ echo "$(distro_id) is unknown, $@ will have to be installed manually."
+ ;;
+esac
+
+# # gflags
+# sudo yum install gflags-devel
+#
+# wget https://github.com/schuhschuh/gflags/archive/master.zip
+# unzip master.zip
+# cd gflags-master
+# mkdir build && cd build
+# export CXXFLAGS="-fPIC" && cmake .. && make VERBOSE=1
+# make && make install
+
+# # snappy-devel
+
+
+echo "Compile rocksdb"
+if [ -e rocksdb ]; then
+ rm -fr rocksdb
+fi
+
+pushd $(dirname /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/bash.sh)/../../../
+git submodule update --init src/rocksdb
+popd
+git clone $(dirname /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/bash.sh)/../../../src/rocksdb rocksdb
+
+# compile code
+cd rocksdb
+if type cmake3 > /dev/null 2>&1 ; then
+ CMAKE=cmake3
+else
+ CMAKE=cmake
+fi
+mkdir build && cd build && ${CMAKE} -DWITH_LIBRADOS=ON -DWITH_SNAPPY=ON -DWITH_GFLAGS=OFF -DFAIL_ON_WARNINGS=OFF ..
+make rocksdb_env_librados_test -j8
+
+echo "Copy ceph.conf"
+# prepare ceph.conf
+mkdir -p ../ceph/src/
+if [ -f "/etc/ceph/ceph.conf" ]; then
+ cp /etc/ceph/ceph.conf ../ceph/src/
+elif [ -f "/etc/ceph/ceph/ceph.conf" ]; then
+ cp /etc/ceph/ceph/ceph.conf ../ceph/src/
+else
+ echo "/etc/ceph/ceph/ceph.conf doesn't exist"
+fi
+
+echo "Run EnvLibrados test"
+# run test
+if [ -f "../ceph/src/ceph.conf" ]
+ then
+ cp env_librados_test ~/cephtest/archive
+ ./env_librados_test
+else
+ echo "../ceph/src/ceph.conf doesn't exist"
+fi
+cd ${CURRENT_PATH}
diff --git a/qa/workunits/rados/test_hang.sh b/qa/workunits/rados/test_hang.sh
new file mode 100755
index 00000000..724e0bb8
--- /dev/null
+++ b/qa/workunits/rados/test_hang.sh
@@ -0,0 +1,8 @@
+#!/bin/sh -ex
+
+# Hang forever for manual testing using the thrasher
+while(true)
+do
+ sleep 300
+done
+exit 0
diff --git a/qa/workunits/rados/test_health_warnings.sh b/qa/workunits/rados/test_health_warnings.sh
new file mode 100755
index 00000000..d393e5c6
--- /dev/null
+++ b/qa/workunits/rados/test_health_warnings.sh
@@ -0,0 +1,76 @@
+#!/usr/bin/env bash
+
+set -uex
+
+# number of osds = 10
+crushtool -o crushmap --build --num_osds 10 host straw 2 rack straw 2 row straw 2 root straw 0
+ceph osd setcrushmap -i crushmap
+ceph osd tree
+ceph tell osd.* injectargs --osd_max_markdown_count 1024 --osd_max_markdown_period 1
+ceph osd set noout
+
+wait_for_healthy() {
+ while ceph health | grep down
+ do
+ sleep 1
+ done
+}
+
+test_mark_two_osds_same_host_down() {
+ ceph osd set noup
+ ceph osd down osd.0 osd.1
+ ceph health detail
+ ceph health | grep "1 host"
+ ceph health | grep "2 osds"
+ ceph health detail | grep "osd.0"
+ ceph health detail | grep "osd.1"
+ ceph osd unset noup
+ wait_for_healthy
+}
+
+test_mark_two_osds_same_rack_down() {
+ ceph osd set noup
+ ceph osd down osd.8 osd.9
+ ceph health detail
+ ceph health | grep "1 host"
+ ceph health | grep "1 rack"
+ ceph health | grep "1 row"
+ ceph health | grep "2 osds"
+ ceph health detail | grep "osd.8"
+ ceph health detail | grep "osd.9"
+ ceph osd unset noup
+ wait_for_healthy
+}
+
+test_mark_all_but_last_osds_down() {
+ ceph osd set noup
+ ceph osd down $(ceph osd ls | sed \$d)
+ ceph health detail
+ ceph health | grep "1 row"
+ ceph health | grep "2 racks"
+ ceph health | grep "4 hosts"
+ ceph health | grep "9 osds"
+ ceph osd unset noup
+ wait_for_healthy
+}
+
+test_mark_two_osds_same_host_down_with_classes() {
+ ceph osd set noup
+ ceph osd crush set-device-class ssd osd.0 osd.2 osd.4 osd.6 osd.8
+ ceph osd crush set-device-class hdd osd.1 osd.3 osd.5 osd.7 osd.9
+ ceph osd down osd.0 osd.1
+ ceph health detail
+ ceph health | grep "1 host"
+ ceph health | grep "2 osds"
+ ceph health detail | grep "osd.0"
+ ceph health detail | grep "osd.1"
+ ceph osd unset noup
+ wait_for_healthy
+}
+
+test_mark_two_osds_same_host_down
+test_mark_two_osds_same_rack_down
+test_mark_all_but_last_osds_down
+test_mark_two_osds_same_host_down_with_classes
+
+exit 0
diff --git a/qa/workunits/rados/test_large_omap_detection.py b/qa/workunits/rados/test_large_omap_detection.py
new file mode 100755
index 00000000..c6cf195d
--- /dev/null
+++ b/qa/workunits/rados/test_large_omap_detection.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# -*- mode:python -*-
+# vim: ts=4 sw=4 smarttab expandtab
+#
+# Copyright (C) 2017 Red Hat <contact@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Library Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library Public License for more details.
+#
+
+import json
+import rados
+import shlex
+import subprocess
+import time
+
+def cleanup(cluster):
+ cluster.delete_pool('large-omap-test-pool')
+ cluster.shutdown()
+
+def init():
+ # For local testing
+ #cluster = rados.Rados(conffile='./ceph.conf')
+ cluster = rados.Rados(conffile='/etc/ceph/ceph.conf')
+ cluster.connect()
+ print("\nCluster ID: " + cluster.get_fsid())
+ cluster.create_pool('large-omap-test-pool')
+ ioctx = cluster.open_ioctx('large-omap-test-pool')
+ ioctx.write_full('large-omap-test-object1', "Lorem ipsum")
+ op = ioctx.create_write_op()
+
+ keys = []
+ values = []
+ for x in range(20001):
+ keys.append(str(x))
+ values.append("X")
+
+ ioctx.set_omap(op, tuple(keys), tuple(values))
+ ioctx.operate_write_op(op, 'large-omap-test-object1', 0)
+ ioctx.release_write_op(op)
+
+ ioctx.write_full('large-omap-test-object2', "Lorem ipsum dolor")
+ op = ioctx.create_write_op()
+
+ buffer = ("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do "
+ "eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut "
+ "enim ad minim veniam, quis nostrud exercitation ullamco laboris "
+ "nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in "
+ "reprehenderit in voluptate velit esse cillum dolore eu fugiat "
+ "nulla pariatur. Excepteur sint occaecat cupidatat non proident, "
+ "sunt in culpa qui officia deserunt mollit anim id est laborum.")
+
+ keys = []
+ values = []
+ for x in range(20000):
+ keys.append(str(x))
+ values.append(buffer)
+
+ ioctx.set_omap(op, tuple(keys), tuple(values))
+ ioctx.operate_write_op(op, 'large-omap-test-object2', 0)
+ ioctx.release_write_op(op)
+ ioctx.close()
+ return cluster
+
+def get_deep_scrub_timestamp(pgid):
+ cmd = ['ceph', 'pg', 'dump', '--format=json-pretty']
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ out = proc.communicate()[0]
+ try:
+ pgstats = json.loads(out)['pg_map']['pg_stats']
+ except KeyError:
+ pgstats = json.loads(out)['pg_stats']
+ for stat in pgstats:
+ if stat['pgid'] == pgid:
+ return stat['last_deep_scrub_stamp']
+
+def wait_for_scrub():
+ osds = set();
+ pgs = dict();
+ cmd = ['ceph', 'osd', 'map', 'large-omap-test-pool',
+ 'large-omap-test-object1', '--format=json-pretty']
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ out = proc.communicate()[0]
+ osds.add(json.loads(out)['acting_primary'])
+ pgs[json.loads(out)['pgid']] = get_deep_scrub_timestamp(json.loads(out)['pgid'])
+ cmd = ['ceph', 'osd', 'map', 'large-omap-test-pool',
+ 'large-omap-test-object2', '--format=json-pretty']
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ out = proc.communicate()[0]
+ osds.add(json.loads(out)['acting_primary'])
+ pgs[json.loads(out)['pgid']] = get_deep_scrub_timestamp(json.loads(out)['pgid'])
+
+ for pg in pgs:
+ command = "ceph pg deep-scrub " + str(pg)
+ subprocess.check_call(shlex.split(command))
+
+ for pg in pgs:
+ RETRIES = 0
+ while RETRIES < 60 and pgs[pg] == get_deep_scrub_timestamp(pg):
+ time.sleep(10)
+ RETRIES += 1
+
+def check_health_output():
+ RETRIES = 0
+ result = 0
+ while RETRIES < 6 and result != 2:
+ result = 0
+ RETRIES += 1
+ output = subprocess.check_output(["ceph", "health", "detail"])
+ for line in output.splitlines():
+ result += int(line.find('2 large omap objects') != -1)
+ time.sleep(10)
+
+ if result != 2:
+ print("Error, got invalid output:")
+ print(output)
+ raise Exception
+
+def main():
+ cluster = init()
+ wait_for_scrub()
+ check_health_output()
+
+ cleanup(cluster)
+
+if __name__ == '__main__':
+ main()
diff --git a/qa/workunits/rados/test_librados_build.sh b/qa/workunits/rados/test_librados_build.sh
new file mode 100755
index 00000000..0bca5050
--- /dev/null
+++ b/qa/workunits/rados/test_librados_build.sh
@@ -0,0 +1,74 @@
+#!/bin/bash -ex
+#
+# Compile and run a librados application outside of the ceph build system, so
+# that we can be sure librados.h[pp] is still usable and hasn't accidentally
+# started depending on internal headers.
+#
+# The script assumes all dependencies - e.g. curl, make, gcc, librados headers,
+# libradosstriper headers, boost headers, etc. - are already installed.
+#
+
+source $(dirname $0)/../ceph-helpers-root.sh
+
+trap cleanup EXIT
+
+SOURCES="hello_radosstriper.cc
+hello_world_c.c
+hello_world.cc
+Makefile
+"
+BINARIES_TO_RUN="hello_world_c
+hello_world_cpp
+"
+BINARIES="${BINARIES_TO_RUN}hello_radosstriper_cpp
+"
+DL_PREFIX="http://git.ceph.com/?p=ceph.git;a=blob_plain;hb=nautilus;f=examples/librados/"
+#DL_PREFIX="https://raw.githubusercontent.com/ceph/ceph/nautilus/examples/librados/"
+DESTDIR=$(pwd)
+
+function cleanup () {
+ for f in $BINARIES$SOURCES ; do
+ rm -f "${DESTDIR}/$f"
+ done
+}
+
+function get_sources () {
+ for s in $SOURCES ; do
+ curl --progress-bar --output $s ${DL_PREFIX}$s
+ done
+}
+
+function check_sources () {
+ for s in $SOURCES ; do
+ test -f $s
+ done
+}
+
+function check_binaries () {
+ for b in $BINARIES ; do
+ file $b
+ test -f $b
+ done
+}
+
+function run_binaries () {
+ for b in $BINARIES_TO_RUN ; do
+ ./$b -c /etc/ceph/ceph.conf
+ done
+}
+
+pushd $DESTDIR
+case $(distro_id) in
+ centos|fedora|rhel|opensuse*|suse|sles)
+ install gcc-c++ make libradospp-devel librados-devel;;
+ ubuntu|debian|devuan)
+ install g++ make libradospp-dev librados-dev;;
+ *)
+ echo "$(distro_id) is unknown, $@ will have to be installed manually."
+esac
+get_sources
+check_sources
+make all-system
+check_binaries
+run_binaries
+popd
diff --git a/qa/workunits/rados/test_pool_access.sh b/qa/workunits/rados/test_pool_access.sh
new file mode 100755
index 00000000..2a7077a4
--- /dev/null
+++ b/qa/workunits/rados/test_pool_access.sh
@@ -0,0 +1,108 @@
+#!/usr/bin/env bash
+
+set -ex
+
+KEYRING=$(mktemp)
+trap cleanup EXIT ERR HUP INT QUIT
+
+cleanup() {
+ (ceph auth del client.mon_read || true) >/dev/null 2>&1
+ (ceph auth del client.mon_write || true) >/dev/null 2>&1
+
+ rm -f $KEYRING
+}
+
+expect_false()
+{
+ set -x
+ if "$@"; then return 1; else return 0; fi
+}
+
+create_pool_op() {
+ ID=$1
+ POOL=$2
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python
+import rados
+
+cluster = rados.Rados(conffile="", rados_id="${ID}")
+cluster.connect()
+cluster.create_pool("${POOL}")
+EOF
+}
+
+delete_pool_op() {
+ ID=$1
+ POOL=$2
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python
+import rados
+
+cluster = rados.Rados(conffile="", rados_id="${ID}")
+cluster.connect()
+cluster.delete_pool("${POOL}")
+EOF
+}
+
+create_pool_snap_op() {
+ ID=$1
+ POOL=$2
+ SNAP=$3
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python
+import rados
+
+cluster = rados.Rados(conffile="", rados_id="${ID}")
+cluster.connect()
+ioctx = cluster.open_ioctx("${POOL}")
+
+ioctx.create_snap("${SNAP}")
+EOF
+}
+
+remove_pool_snap_op() {
+ ID=$1
+ POOL=$2
+ SNAP=$3
+
+ cat << EOF | CEPH_ARGS="-k $KEYRING" python
+import rados
+
+cluster = rados.Rados(conffile="", rados_id="${ID}")
+cluster.connect()
+ioctx = cluster.open_ioctx("${POOL}")
+
+ioctx.remove_snap("${SNAP}")
+EOF
+}
+
+test_pool_op()
+{
+ ceph auth get-or-create client.mon_read mon 'allow r' >> $KEYRING
+ ceph auth get-or-create client.mon_write mon 'allow *' >> $KEYRING
+
+ expect_false create_pool_op mon_read pool1
+ create_pool_op mon_write pool1
+
+ expect_false create_pool_snap_op mon_read pool1 snap1
+ create_pool_snap_op mon_write pool1 snap1
+
+ expect_false remove_pool_snap_op mon_read pool1 snap1
+ remove_pool_snap_op mon_write pool1 snap1
+
+ expect_false delete_pool_op mon_read pool1
+ delete_pool_op mon_write pool1
+}
+
+key=`ceph auth get-or-create-key client.poolaccess1 mon 'allow r' osd 'allow *'`
+rados --id poolaccess1 --key $key -p rbd ls
+
+key=`ceph auth get-or-create-key client.poolaccess2 mon 'allow r' osd 'allow * pool=nopool'`
+expect_false rados --id poolaccess2 --key $key -p rbd ls
+
+key=`ceph auth get-or-create-key client.poolaccess3 mon 'allow r' osd 'allow rw pool=nopool'`
+expect_false rados --id poolaccess3 --key $key -p rbd ls
+
+test_pool_op
+
+echo OK
diff --git a/qa/workunits/rados/test_pool_quota.sh b/qa/workunits/rados/test_pool_quota.sh
new file mode 100755
index 00000000..0eacefc6
--- /dev/null
+++ b/qa/workunits/rados/test_pool_quota.sh
@@ -0,0 +1,68 @@
+#!/bin/sh -ex
+
+p=`uuidgen`
+
+# objects
+ceph osd pool create $p 12
+ceph osd pool set-quota $p max_objects 10
+ceph osd pool application enable $p rados
+
+for f in `seq 1 10` ; do
+ rados -p $p put obj$f /etc/passwd
+done
+
+sleep 30
+
+rados -p $p put onemore /etc/passwd &
+pid=$!
+
+ceph osd pool set-quota $p max_objects 100
+wait $pid
+[ $? -ne 0 ] && exit 1 || true
+
+rados -p $p put twomore /etc/passwd
+
+# bytes
+ceph osd pool set-quota $p max_bytes 100
+sleep 30
+
+rados -p $p put two /etc/passwd &
+pid=$!
+
+ceph osd pool set-quota $p max_bytes 0
+ceph osd pool set-quota $p max_objects 0
+wait $pid
+[ $? -ne 0 ] && exit 1 || true
+
+rados -p $p put three /etc/passwd
+
+
+#one pool being full does not block a different pool
+
+pp=`uuidgen`
+
+ceph osd pool create $pp 12
+ceph osd pool application enable $pp rados
+
+# set objects quota
+ceph osd pool set-quota $pp max_objects 10
+sleep 30
+
+for f in `seq 1 10` ; do
+ rados -p $pp put obj$f /etc/passwd
+done
+
+sleep 30
+
+rados -p $p put threemore /etc/passwd
+
+ceph osd pool set-quota $p max_bytes 0
+ceph osd pool set-quota $p max_objects 0
+
+sleep 30
+# done
+ceph osd pool delete $p $p --yes-i-really-really-mean-it
+ceph osd pool delete $pp $pp --yes-i-really-really-mean-it
+
+echo OK
+
diff --git a/qa/workunits/rados/test_python.sh b/qa/workunits/rados/test_python.sh
new file mode 100755
index 00000000..80369c8d
--- /dev/null
+++ b/qa/workunits/rados/test_python.sh
@@ -0,0 +1,4 @@
+#!/bin/sh -ex
+
+${PYTHON:-python} -m nose -v $(dirname $0)/../../../src/test/pybind/test_rados.py
+exit 0
diff --git a/qa/workunits/rados/test_rados_timeouts.sh b/qa/workunits/rados/test_rados_timeouts.sh
new file mode 100755
index 00000000..327c7ab3
--- /dev/null
+++ b/qa/workunits/rados/test_rados_timeouts.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+set -x
+
+delay_mon() {
+ MSGTYPE=$1
+ shift
+ $@ --rados-mon-op-timeout 1 --ms-inject-delay-type mon --ms-inject-delay-max 10000000 --ms-inject-delay-probability 1 --ms-inject-delay-msg-type $MSGTYPE
+ if [ $? -eq 0 ]; then
+ exit 1
+ fi
+}
+
+delay_osd() {
+ MSGTYPE=$1
+ shift
+ $@ --rados-osd-op-timeout 1 --ms-inject-delay-type osd --ms-inject-delay-max 10000000 --ms-inject-delay-probability 1 --ms-inject-delay-msg-type $MSGTYPE
+ if [ $? -eq 0 ]; then
+ exit 2
+ fi
+}
+
+# pool ops
+delay_mon omap rados lspools
+delay_mon poolopreply ceph osd pool create test 8
+delay_mon poolopreply rados mksnap -p test snap
+delay_mon poolopreply ceph osd pool rm test test --yes-i-really-really-mean-it
+
+# other mon ops
+delay_mon getpoolstats rados df
+delay_mon mon_command ceph df
+delay_mon omap ceph osd dump
+delay_mon omap ceph -s
+
+# osd ops
+delay_osd osd_op_reply rados -p data put ls /bin/ls
+delay_osd osd_op_reply rados -p data get ls - >/dev/null
+delay_osd osd_op_reply rados -p data ls
+delay_osd command_reply ceph tell osd.0 bench 1 1
+
+# rbd commands, using more kinds of osd ops
+rbd create -s 1 test
+delay_osd osd_op_reply rbd watch test
+delay_osd osd_op_reply rbd info test
+delay_osd osd_op_reply rbd snap create test@snap
+delay_osd osd_op_reply rbd import /bin/ls ls
+rbd rm test
+
+echo OK
diff --git a/qa/workunits/rados/test_rados_tool.sh b/qa/workunits/rados/test_rados_tool.sh
new file mode 100755
index 00000000..5325743f
--- /dev/null
+++ b/qa/workunits/rados/test_rados_tool.sh
@@ -0,0 +1,924 @@
+#!/usr/bin/env bash
+
+set -x
+
+die() {
+ echo "$@"
+ exit 1
+}
+
+usage() {
+ cat <<EOF
+test_rados_tool.sh: tests rados_tool
+-c: RADOS configuration file to use [optional]
+-k: keep temp files
+-h: this help message
+-p: set temporary pool to use [optional]
+EOF
+}
+
+do_run() {
+ if [ "$1" == "--tee" ]; then
+ shift
+ tee_out="$1"
+ shift
+ "$@" | tee $tee_out
+ else
+ "$@"
+ fi
+}
+
+run_expect_fail() {
+ echo "RUN_EXPECT_FAIL: " "$@"
+ do_run "$@"
+ [ $? -eq 0 ] && die "expected failure, but got success! cmd: $@"
+}
+
+run_expect_succ() {
+ echo "RUN_EXPECT_SUCC: " "$@"
+ do_run "$@"
+ [ $? -ne 0 ] && die "expected success, but got failure! cmd: $@"
+}
+
+run_expect_nosignal() {
+ echo "RUN_EXPECT_NOSIGNAL: " "$@"
+ do_run "$@"
+ [ $? -ge 128 ] && die "expected success or fail, but got signal! cmd: $@"
+}
+
+run() {
+ echo "RUN: " $@
+ do_run "$@"
+}
+
+if [ -n "$CEPH_BIN" ] ; then
+ # CMake env
+ RADOS_TOOL="$CEPH_BIN/rados"
+ CEPH_TOOL="$CEPH_BIN/ceph"
+else
+ # executables should be installed by the QA env
+ RADOS_TOOL=$(which rados)
+ CEPH_TOOL=$(which ceph)
+fi
+
+KEEP_TEMP_FILES=0
+POOL=trs_pool
+POOL_CP_TARGET=trs_pool.2
+POOL_EC=trs_pool_ec
+
+[ -x "$RADOS_TOOL" ] || die "couldn't find $RADOS_TOOL binary to test"
+[ -x "$CEPH_TOOL" ] || die "couldn't find $CEPH_TOOL binary to test"
+
+while getopts "c:hkp:" flag; do
+ case $flag in
+ c) RADOS_TOOL="$RADOS_TOOL -c $OPTARG";;
+ k) KEEP_TEMP_FILES=1;;
+ h) usage; exit 0;;
+ p) POOL=$OPTARG;;
+ *) echo; usage; exit 1;;
+ esac
+done
+
+TDIR=`mktemp -d -t test_rados_tool.XXXXXXXXXX` || die "mktemp failed"
+[ $KEEP_TEMP_FILES -eq 0 ] && trap "rm -rf ${TDIR}; exit" INT TERM EXIT
+
+# ensure rados doesn't segfault without --pool
+run_expect_nosignal "$RADOS_TOOL" --snap "asdf" ls
+run_expect_nosignal "$RADOS_TOOL" --snapid "0" ls
+run_expect_nosignal "$RADOS_TOOL" --object_locator "asdf" ls
+run_expect_nosignal "$RADOS_TOOL" --namespace "asdf" ls
+
+run_expect_succ "$CEPH_TOOL" osd pool create "$POOL" 8
+run_expect_succ "$CEPH_TOOL" osd erasure-code-profile set myprofile k=2 m=1 stripe_unit=2K crush-failure-domain=osd --force
+run_expect_succ "$CEPH_TOOL" osd pool create "$POOL_EC" 100 100 erasure myprofile
+
+
+# expb happens to be the empty export for legacy reasons
+run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/expb"
+
+# expa has objects foo, foo2 and bar
+run_expect_succ "$RADOS_TOOL" -p "$POOL" put foo /etc/fstab
+run_expect_succ "$RADOS_TOOL" -p "$POOL" put foo2 /etc/fstab
+run_expect_succ "$RADOS_TOOL" -p "$POOL" put bar /etc/fstab
+run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/expa"
+
+# expc has foo and foo2 with some attributes and omaps set
+run_expect_succ "$RADOS_TOOL" -p "$POOL" rm bar
+run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr foo "rados.toothbrush" "toothbrush"
+run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr foo "rados.toothpaste" "crest"
+run_expect_succ "$RADOS_TOOL" -p "$POOL" setomapval foo "rados.floss" "myfloss"
+run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr foo2 "rados.toothbrush" "green"
+run_expect_succ "$RADOS_TOOL" -p "$POOL" setomapheader foo2 "foo2.header"
+run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/expc"
+
+# make sure that --create works
+run "$CEPH_TOOL" osd pool rm "$POOL" "$POOL" --yes-i-really-really-mean-it
+run_expect_succ "$RADOS_TOOL" -p "$POOL" --create import "$TDIR/expa"
+
+# make sure that lack of --create fails
+run_expect_succ "$CEPH_TOOL" osd pool rm "$POOL" "$POOL" --yes-i-really-really-mean-it
+run_expect_fail "$RADOS_TOOL" -p "$POOL" import "$TDIR/expa"
+
+run_expect_succ "$RADOS_TOOL" -p "$POOL" --create import "$TDIR/expa"
+
+# inaccessible import src should fail
+run_expect_fail "$RADOS_TOOL" -p "$POOL" import "$TDIR/dir_nonexistent"
+
+# export an empty pool to test purge
+run_expect_succ "$RADOS_TOOL" purge "$POOL" --yes-i-really-really-mean-it
+run_expect_succ "$RADOS_TOOL" -p "$POOL" export "$TDIR/empty"
+cmp -s "$TDIR/expb" "$TDIR/empty" \
+ || die "failed to export the same stuff we imported!"
+rm -f "$TDIR/empty"
+
+# import some stuff with extended attributes on it
+run_expect_succ "$RADOS_TOOL" -p "$POOL" import "$TDIR/expc"
+VAL=`"$RADOS_TOOL" -p "$POOL" getxattr foo "rados.toothbrush"`
+[ ${VAL} = "toothbrush" ] || die "Invalid attribute after import"
+
+# the second time, the xattrs should match, so there should be nothing to do.
+run_expect_succ "$RADOS_TOOL" -p "$POOL" import "$TDIR/expc"
+VAL=`"$RADOS_TOOL" -p "$POOL" getxattr foo "rados.toothbrush"`
+[ "${VAL}" = "toothbrush" ] || die "Invalid attribute after second import"
+
+# Now try with --no-overwrite option after changing an attribute
+run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr foo "rados.toothbrush" "dentist"
+run_expect_succ "$RADOS_TOOL" -p "$POOL" import --no-overwrite "$TDIR/expc"
+VAL=`"$RADOS_TOOL" -p "$POOL" getxattr foo "rados.toothbrush"`
+[ "${VAL}" = "dentist" ] || die "Invalid attribute after second import"
+
+# now force it to copy everything
+run_expect_succ "$RADOS_TOOL" -p "$POOL" import "$TDIR/expc"
+VAL=`"$RADOS_TOOL" -p "$POOL" getxattr foo "rados.toothbrush"`
+[ "${VAL}" = "toothbrush" ] || die "Invalid attribute after second import"
+
+# test copy pool
+run "$CEPH_TOOL" osd pool rm "$POOL" "$POOL" --yes-i-really-really-mean-it
+run "$CEPH_TOOL" osd pool rm "$POOL_CP_TARGET" "$POOL_CP_TARGET" --yes-i-really-really-mean-it
+run_expect_succ "$CEPH_TOOL" osd pool create "$POOL" 8
+run_expect_succ "$CEPH_TOOL" osd pool create "$POOL_CP_TARGET" 8
+
+# create src files
+mkdir -p "$TDIR/dir_cp_src"
+for i in `seq 1 5`; do
+ fname="$TDIR/dir_cp_src/f.$i"
+ objname="f.$i"
+ dd if=/dev/urandom of="$fname" bs=$((1024*1024)) count=$i
+ run_expect_succ "$RADOS_TOOL" -p "$POOL" put $objname "$fname"
+
+# a few random attrs
+ for j in `seq 1 4`; do
+ rand_str=`dd if=/dev/urandom bs=4 count=1 | hexdump -x`
+ run_expect_succ "$RADOS_TOOL" -p "$POOL" setxattr $objname attr.$j "$rand_str"
+ run_expect_succ --tee "$fname.attr.$j" "$RADOS_TOOL" -p "$POOL" getxattr $objname attr.$j
+ done
+
+ rand_str=`dd if=/dev/urandom bs=4 count=1 | hexdump -x`
+ run_expect_succ "$RADOS_TOOL" -p "$POOL" setomapheader $objname "$rand_str"
+ run_expect_succ --tee "$fname.omap.header" "$RADOS_TOOL" -p "$POOL" getomapheader $objname
+
+# a few random omap keys
+ for j in `seq 1 4`; do
+ rand_str=`dd if=/dev/urandom bs=4 count=1 | hexdump -x`
+ run_expect_succ "$RADOS_TOOL" -p "$POOL" setomapval $objname key.$j "$rand_str"
+ done
+ run_expect_succ --tee "$fname.omap.vals" "$RADOS_TOOL" -p "$POOL" listomapvals $objname
+done
+
+run_expect_succ "$RADOS_TOOL" cppool "$POOL" "$POOL_CP_TARGET"
+
+mkdir -p "$TDIR/dir_cp_dst"
+for i in `seq 1 5`; do
+ fname="$TDIR/dir_cp_dst/f.$i"
+ objname="f.$i"
+ run_expect_succ "$RADOS_TOOL" -p "$POOL_CP_TARGET" get $objname "$fname"
+
+# a few random attrs
+ for j in `seq 1 4`; do
+ run_expect_succ --tee "$fname.attr.$j" "$RADOS_TOOL" -p "$POOL_CP_TARGET" getxattr $objname attr.$j
+ done
+
+ run_expect_succ --tee "$fname.omap.header" "$RADOS_TOOL" -p "$POOL_CP_TARGET" getomapheader $objname
+ run_expect_succ --tee "$fname.omap.vals" "$RADOS_TOOL" -p "$POOL_CP_TARGET" listomapvals $objname
+done
+
+diff -q -r "$TDIR/dir_cp_src" "$TDIR/dir_cp_dst" \
+ || die "copy pool validation failed!"
+
+for opt in \
+ block-size \
+ concurrent-ios \
+ min-object-size \
+ max-object-size \
+ min-op-len \
+ max-op-len \
+ max-ops \
+ max-backlog \
+ target-throughput \
+ read-percent \
+ num-objects \
+ run-length \
+ ; do
+ run_expect_succ "$RADOS_TOOL" --$opt 4 df
+ run_expect_fail "$RADOS_TOOL" --$opt 4k df
+done
+
+run_expect_succ "$RADOS_TOOL" lock list f.1 --lock-duration 4 --pool "$POOL"
+echo # previous command doesn't output an end of line: issue #9735
+run_expect_fail "$RADOS_TOOL" lock list f.1 --lock-duration 4k --pool "$POOL"
+
+run_expect_succ "$RADOS_TOOL" mksnap snap1 --pool "$POOL"
+snapid=$("$RADOS_TOOL" lssnap --pool "$POOL" | grep snap1 | cut -f1)
+[ $? -ne 0 ] && die "expected success, but got failure! cmd: \"$RADOS_TOOL\" lssnap --pool \"$POOL\" | grep snap1 | cut -f1"
+run_expect_succ "$RADOS_TOOL" ls --pool "$POOL" --snapid="$snapid"
+run_expect_fail "$RADOS_TOOL" ls --pool "$POOL" --snapid="$snapid"k
+
+run_expect_succ "$RADOS_TOOL" truncate f.1 0 --pool "$POOL"
+run_expect_fail "$RADOS_TOOL" truncate f.1 0k --pool "$POOL"
+
+run "$CEPH_TOOL" osd pool rm delete_me_mkpool_test delete_me_mkpool_test --yes-i-really-really-mean-it
+run_expect_succ "$CEPH_TOOL" osd pool create delete_me_mkpool_test 1
+
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 write
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 1k write
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 write --format json --output "$TDIR/bench.json"
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 1 write --output "$TDIR/bench.json"
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --format json --no-cleanup
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 rand --format json
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 rand -f json
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 seq --format json
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 1 seq -f json
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-omap
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-object
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-xattr
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-xattr --write-object
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-xattr --write-omap
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-omap --write-object
+run_expect_succ "$RADOS_TOOL" --pool "$POOL" bench 5 write --write-xattr --write-omap --write-object
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-omap
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-object
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-xattr
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-xattr --write-object
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-xattr --write-omap
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-omap --write-object
+run_expect_fail "$RADOS_TOOL" --pool "$POOL" bench 5 read --write-xattr --write-omap --write-object
+
+for i in $("$RADOS_TOOL" --pool "$POOL" ls | grep "benchmark_data"); do
+ "$RADOS_TOOL" --pool "$POOL" truncate $i 0
+done
+
+run_expect_nosignal "$RADOS_TOOL" --pool "$POOL" bench 1 rand
+run_expect_nosignal "$RADOS_TOOL" --pool "$POOL" bench 1 seq
+
+set -e
+
+OBJ=test_rados_obj
+
+expect_false()
+{
+ if "$@"; then return 1; else return 0; fi
+}
+
+cleanup() {
+ $RADOS_TOOL -p $POOL rm $OBJ > /dev/null 2>&1 || true
+ $RADOS_TOOL -p $POOL_EC rm $OBJ > /dev/null 2>&1 || true
+}
+
+test_omap() {
+ cleanup
+ for i in $(seq 1 1 10)
+ do
+ if [ $(($i % 2)) -eq 0 ]; then
+ $RADOS_TOOL -p $POOL setomapval $OBJ $i $i
+ else
+ echo -n "$i" | $RADOS_TOOL -p $POOL setomapval $OBJ $i
+ fi
+ $RADOS_TOOL -p $POOL getomapval $OBJ $i | grep -q "|$i|\$"
+ done
+ $RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 10
+ for i in $(seq 1 1 5)
+ do
+ $RADOS_TOOL -p $POOL rmomapkey $OBJ $i
+ done
+ $RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 5
+ $RADOS_TOOL -p $POOL clearomap $OBJ
+ $RADOS_TOOL -p $POOL listomapvals $OBJ | wc -l | grep 0
+ cleanup
+
+ for i in $(seq 1 1 10)
+ do
+ dd if=/dev/urandom bs=128 count=1 > $TDIR/omap_key
+ if [ $(($i % 2)) -eq 0 ]; then
+ $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key setomapval $OBJ $i
+ else
+ echo -n "$i" | $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key setomapval $OBJ
+ fi
+ $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key getomapval $OBJ | grep -q "|$i|\$"
+ $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key rmomapkey $OBJ
+ $RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 0
+ done
+ cleanup
+}
+
+test_xattr() {
+ cleanup
+ $RADOS_TOOL -p $POOL put $OBJ /etc/passwd
+ V1=`mktemp fooattrXXXXXXX`
+ V2=`mktemp fooattrXXXXXXX`
+ echo -n fooval > $V1
+ expect_false $RADOS_TOOL -p $POOL setxattr $OBJ 2>/dev/null
+ expect_false $RADOS_TOOL -p $POOL setxattr $OBJ foo fooval extraarg 2>/dev/null
+ $RADOS_TOOL -p $POOL setxattr $OBJ foo fooval
+ $RADOS_TOOL -p $POOL getxattr $OBJ foo > $V2
+ cmp $V1 $V2
+ cat $V1 | $RADOS_TOOL -p $POOL setxattr $OBJ bar
+ $RADOS_TOOL -p $POOL getxattr $OBJ bar > $V2
+ cmp $V1 $V2
+ $RADOS_TOOL -p $POOL listxattr $OBJ > $V1
+ grep -q foo $V1
+ grep -q bar $V1
+ [ `cat $V1 | wc -l` -eq 2 ]
+ rm $V1 $V2
+ cleanup
+}
+test_rmobj() {
+ p=`uuidgen`
+ $CEPH_TOOL osd pool create $p 1
+ $CEPH_TOOL osd pool set-quota $p max_objects 1
+ V1=`mktemp fooattrXXXXXXX`
+ $RADOS_TOOL put $OBJ $V1 -p $p
+ while ! $CEPH_TOOL osd dump | grep 'full_quota max_objects'
+ do
+ sleep 2
+ done
+ $RADOS_TOOL -p $p rm $OBJ --force-full
+ $CEPH_TOOL osd pool rm $p $p --yes-i-really-really-mean-it
+ rm $V1
+}
+
+test_ls() {
+ echo "Testing rados ls command"
+ p=`uuidgen`
+ $CEPH_TOOL osd pool create $p 1
+ NS=10
+ OBJS=20
+ # Include default namespace (0) in the total
+ TOTAL=$(expr $OBJS \* $(expr $NS + 1))
+
+ for nsnum in `seq 0 $NS`
+ do
+ for onum in `seq 1 $OBJS`
+ do
+ if [ "$nsnum" = "0" ];
+ then
+ "$RADOS_TOOL" -p $p put obj${onum} /etc/fstab 2> /dev/null
+ else
+ "$RADOS_TOOL" -p $p -N "NS${nsnum}" put obj${onum} /etc/fstab 2> /dev/null
+ fi
+ done
+ done
+ CHECK=$("$RADOS_TOOL" -p $p ls 2> /dev/null | wc -l)
+ if [ "$OBJS" -ne "$CHECK" ];
+ then
+ die "Created $OBJS objects in default namespace but saw $CHECK"
+ fi
+ TESTNS=NS${NS}
+ CHECK=$("$RADOS_TOOL" -p $p -N $TESTNS ls 2> /dev/null | wc -l)
+ if [ "$OBJS" -ne "$CHECK" ];
+ then
+ die "Created $OBJS objects in $TESTNS namespace but saw $CHECK"
+ fi
+ CHECK=$("$RADOS_TOOL" -p $p --all ls 2> /dev/null | wc -l)
+ if [ "$TOTAL" -ne "$CHECK" ];
+ then
+ die "Created $TOTAL objects but saw $CHECK"
+ fi
+
+ $CEPH_TOOL osd pool rm $p $p --yes-i-really-really-mean-it
+}
+
+test_cleanup() {
+ echo "Testing rados cleanup command"
+ p=`uuidgen`
+ $CEPH_TOOL osd pool create $p 1
+ NS=5
+ OBJS=4
+ # Include default namespace (0) in the total
+ TOTAL=$(expr $OBJS \* $(expr $NS + 1))
+
+ for nsnum in `seq 0 $NS`
+ do
+ for onum in `seq 1 $OBJS`
+ do
+ if [ "$nsnum" = "0" ];
+ then
+ "$RADOS_TOOL" -p $p put obj${onum} /etc/fstab 2> /dev/null
+ else
+ "$RADOS_TOOL" -p $p -N "NS${nsnum}" put obj${onum} /etc/fstab 2> /dev/null
+ fi
+ done
+ done
+
+ $RADOS_TOOL -p $p --all ls > $TDIR/before.ls.out 2> /dev/null
+
+ $RADOS_TOOL -p $p bench 3 write --no-cleanup 2> /dev/null
+ $RADOS_TOOL -p $p -N NS1 bench 3 write --no-cleanup 2> /dev/null
+ $RADOS_TOOL -p $p -N NS2 bench 3 write --no-cleanup 2> /dev/null
+ $RADOS_TOOL -p $p -N NS3 bench 3 write --no-cleanup 2> /dev/null
+ # Leave dangling objects without a benchmark_last_metadata in NS4
+ expect_false timeout 3 $RADOS_TOOL -p $p -N NS4 bench 30 write --no-cleanup 2> /dev/null
+ $RADOS_TOOL -p $p -N NS5 bench 3 write --no-cleanup 2> /dev/null
+
+ $RADOS_TOOL -p $p -N NS3 cleanup 2> /dev/null
+ #echo "Check NS3 after specific cleanup"
+ CHECK=$($RADOS_TOOL -p $p -N NS3 ls | wc -l)
+ if [ "$OBJS" -ne "$CHECK" ] ;
+ then
+ die "Expected $OBJS objects in NS3 but saw $CHECK"
+ fi
+
+ #echo "Try to cleanup all"
+ $RADOS_TOOL -p $p --all cleanup
+ #echo "Check all namespaces"
+ $RADOS_TOOL -p $p --all ls > $TDIR/after.ls.out 2> /dev/null
+ CHECK=$(cat $TDIR/after.ls.out | wc -l)
+ if [ "$TOTAL" -ne "$CHECK" ];
+ then
+ die "Expected $TOTAL objects but saw $CHECK"
+ fi
+ if ! diff $TDIR/before.ls.out $TDIR/after.ls.out
+ then
+ die "Different objects found after cleanup"
+ fi
+
+ set +e
+ run_expect_fail $RADOS_TOOL -p $p cleanup --prefix illegal_prefix
+ run_expect_succ $RADOS_TOOL -p $p cleanup --prefix benchmark_data_otherhost
+ set -e
+
+ $CEPH_TOOL osd pool rm $p $p --yes-i-really-really-mean-it
+}
+
+function test_append()
+{
+ cleanup
+
+ # create object
+ touch ./rados_append_null
+ $RADOS_TOOL -p $POOL append $OBJ ./rados_append_null
+ $RADOS_TOOL -p $POOL get $OBJ ./rados_append_0_out
+ cmp ./rados_append_null ./rados_append_0_out
+
+ # append 4k, total size 4k
+ dd if=/dev/zero of=./rados_append_4k bs=4k count=1
+ $RADOS_TOOL -p $POOL append $OBJ ./rados_append_4k
+ $RADOS_TOOL -p $POOL get $OBJ ./rados_append_4k_out
+ cmp ./rados_append_4k ./rados_append_4k_out
+
+ # append 4k, total size 8k
+ $RADOS_TOOL -p $POOL append $OBJ ./rados_append_4k
+ $RADOS_TOOL -p $POOL get $OBJ ./rados_append_4k_out
+ read_size=`ls -l ./rados_append_4k_out | awk -F ' ' '{print $5}'`
+ if [ 8192 -ne $read_size ];
+ then
+ die "Append failed expecting 8192 read $read_size"
+ fi
+
+ # append 10M, total size 10493952
+ dd if=/dev/zero of=./rados_append_10m bs=10M count=1
+ $RADOS_TOOL -p $POOL append $OBJ ./rados_append_10m
+ $RADOS_TOOL -p $POOL get $OBJ ./rados_append_10m_out
+ read_size=`ls -l ./rados_append_10m_out | awk -F ' ' '{print $5}'`
+ if [ 10493952 -ne $read_size ];
+ then
+ die "Append failed expecting 10493952 read $read_size"
+ fi
+
+ # cleanup
+ cleanup
+
+ # create object
+ $RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_null
+ $RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_0_out
+ cmp rados_append_null rados_append_0_out
+
+ # append 4k, total size 4k
+ $RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_4k
+ $RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_4k_out
+ cmp rados_append_4k rados_append_4k_out
+
+ # append 4k, total size 8k
+ $RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_4k
+ $RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_4k_out
+ read_size=`ls -l ./rados_append_4k_out | awk -F ' ' '{print $5}'`
+ if [ 8192 -ne $read_size ];
+ then
+ die "Append failed expecting 8192 read $read_size"
+ fi
+
+ # append 10M, total size 10493952
+ $RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_10m
+ $RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_10m_out
+ read_size=`ls -l ./rados_append_10m_out | awk -F ' ' '{print $5}'`
+ if [ 10493952 -ne $read_size ];
+ then
+ die "Append failed expecting 10493952 read $read_size"
+ fi
+
+ cleanup
+ rm -rf ./rados_append_null ./rados_append_0_out
+ rm -rf ./rados_append_4k ./rados_append_4k_out ./rados_append_10m ./rados_append_10m_out
+}
+
+function test_put()
+{
+ # rados put test:
+ cleanup
+
+ # create file in local fs
+ dd if=/dev/urandom of=rados_object_10k bs=1K count=10
+
+ # test put command
+ $RADOS_TOOL -p $POOL put $OBJ ./rados_object_10k
+ $RADOS_TOOL -p $POOL get $OBJ ./rados_object_10k_out
+ cmp ./rados_object_10k ./rados_object_10k_out
+ cleanup
+
+ # test put command with offset 0
+ $RADOS_TOOL -p $POOL put $OBJ ./rados_object_10k --offset 0
+ $RADOS_TOOL -p $POOL get $OBJ ./rados_object_offset_0_out
+ cmp ./rados_object_10k ./rados_object_offset_0_out
+ cleanup
+
+ # test put command with offset 1000
+ $RADOS_TOOL -p $POOL put $OBJ ./rados_object_10k --offset 1000
+ $RADOS_TOOL -p $POOL get $OBJ ./rados_object_offset_1000_out
+ cmp ./rados_object_10k ./rados_object_offset_1000_out 0 1000
+ cleanup
+
+ rm -rf ./rados_object_10k ./rados_object_10k_out ./rados_object_offset_0_out ./rados_object_offset_1000_out
+}
+
+function test_stat()
+{
+ bluestore=$("$CEPH_TOOL" osd metadata | grep '"osd_objectstore": "bluestore"' | cut -f1)
+ # create file in local fs
+ dd if=/dev/urandom of=rados_object_128k bs=64K count=2
+
+ # rados df test (replicated_pool):
+ $RADOS_TOOL purge $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool rm $POOL $POOL --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool create $POOL 8
+ $CEPH_TOOL osd pool set $POOL size 3
+
+ # put object with 1 MB gap in front
+ $RADOS_TOOL -p $POOL put $OBJ ./rados_object_128k --offset=1048576
+ MATCH_CNT=0
+ if [ "" == "$bluestore" ];
+ then
+ STORED=1.1
+ STORED_UNIT="MiB"
+ else
+ STORED=384
+ STORED_UNIT="KiB"
+ fi
+ for i in {1..60}
+ do
+ IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "")
+ [[ -z $IN ]] && sleep 1 && continue
+ IFS=' ' read -ra VALS <<< "$IN"
+
+ # verification is a bit tricky due to stats report's eventual model
+ # VALS[1] - STORED
+ # VALS[2] - STORED units
+ # VALS[3] - OBJECTS
+ # VALS[5] - COPIES
+ # VALS[12] - WR_OPS
+ # VALS[13] - WR
+ # VALS[14] - WR uints
+ # implies replication factor 3
+ if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "1" ] && [ ${VALS[13]} == 128 ] && [ ${VALS[14]} == "KiB" ]
+ then
+ # enforce multiple match to make sure stats aren't changing any more
+ MATCH_CNT=$((MATCH_CNT+1))
+ [[ $MATCH_CNT == 3 ]] && break
+ sleep 1
+ continue
+ fi
+ MATCH_CNT=0
+ sleep 1
+ continue
+ done
+ [[ -z $IN ]] && die "Failed to retrieve any pool stats within 60 seconds"
+ if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "1" ] || [ ${VALS[13]} != 128 ] || [ ${VALS[14]} != "KiB" ]
+ then
+ die "Failed to retrieve proper pool stats within 60 seconds"
+ fi
+
+ # overwrite data at 1MB offset
+ $RADOS_TOOL -p $POOL put $OBJ ./rados_object_128k --offset=1048576
+ MATCH_CNT=0
+ if [ "" == "$bluestore" ];
+ then
+ STORED=1.1
+ STORED_UNIT="MiB"
+ else
+ STORED=384
+ STORED_UNIT="KiB"
+ fi
+ for i in {1..60}
+ do
+ IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "")
+ IFS=' ' read -ra VALS <<< "$IN"
+
+ # verification is a bit tricky due to stats report's eventual model
+ # VALS[1] - STORED
+ # VALS[2] - STORED units
+ # VALS[3] - OBJECTS
+ # VALS[5] - COPIES
+ # VALS[12] - WR_OPS
+ # VALS[13] - WR
+ # VALS[14] - WR uints
+ # implies replication factor 3
+ if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "2" ] && [ ${VALS[13]} == 256 ] && [ ${VALS[14]} == "KiB" ]
+ then
+ # enforce multiple match to make sure stats aren't changing any more
+ MATCH_CNT=$((MATCH_CNT+1))
+ [[ $MATCH_CNT == 3 ]] && break
+ sleep 1
+ continue
+ fi
+ MATCH_CNT=0
+ sleep 1
+ continue
+ done
+ if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "2" ] || [ ${VALS[13]} != 256 ] || [ ${VALS[14]} != "KiB" ]
+ then
+ die "Failed to retrieve proper pool stats within 60 seconds"
+ fi
+
+ # write data at 64K offset
+ $RADOS_TOOL -p $POOL put $OBJ ./rados_object_128k --offset=65536
+ MATCH_CNT=0
+ if [ "" == "$bluestore" ];
+ then
+ STORED=1.1
+ STORED_UNIT="MiB"
+ else
+ STORED=768
+ STORED_UNIT="KiB"
+ fi
+ for i in {1..60}
+ do
+ IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "")
+ IFS=' ' read -ra VALS <<< "$IN"
+
+ # verification is a bit tricky due to stats report's eventual model
+ # VALS[1] - STORED
+ # VALS[2] - STORED units
+ # VALS[3] - OBJECTS
+ # VALS[5] - COPIES
+ # VALS[12] - WR_OPS
+ # VALS[13] - WR
+ # VALS[14] - WR uints
+ # implies replication factor 3
+ if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "3" ] && [ ${VALS[13]} == 384 ] && [ ${VALS[14]} == "KiB" ]
+ then
+ # enforce multiple match to make sure stats aren't changing any more
+ MATCH_CNT=$((MATCH_CNT+1))
+ [[ $MATCH_CNT == 3 ]] && break
+ sleep 1
+ continue
+ fi
+ MATCH_CNT=0
+ sleep 1
+ continue
+ done
+ if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "3" ] || [ ${VALS[13]} != 384 ] || [ ${VALS[14]} != "KiB" ]
+ then
+ die "Failed to retrieve proper pool stats within 60 seconds"
+ fi
+
+ # overwrite object totally
+ $RADOS_TOOL -p $POOL put $OBJ ./rados_object_128k
+ MATCH_CNT=0
+ if [ "" == "$bluestore" ];
+ then
+ STORED=128
+ STORED_UNIT="KiB"
+ else
+ STORED=384
+ STORED_UNIT="KiB"
+ fi
+ for i in {1..60}
+ do
+ IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "")
+ IFS=' ' read -ra VALS <<< "$IN"
+
+ # verification is a bit tricky due to stats report's eventual model
+ # VALS[1] - STORED
+ # VALS[2] - STORED units
+ # VALS[3] - OBJECTS
+ # VALS[5] - COPIES
+ # VALS[12] - WR_OPS
+ # VALS[13] - WR
+ # VALS[14] - WR uints
+ # implies replication factor 3
+ if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "4" ] && [ ${VALS[13]} == 512 ] && [ ${VALS[14]} == "KiB" ]
+ then
+ # enforce multiple match to make sure stats aren't changing any more
+ MATCH_CNT=$((MATCH_CNT+1))
+ [[ $MATCH_CNT == 3 ]] && break
+ sleep 1
+ continue
+ fi
+ MATCH_CNT=0
+ sleep 1
+ continue
+ done
+ if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "4" ] || [ ${VALS[13]} != 512 ] || [ ${VALS[14]} != "KiB" ]
+ then
+ die "Failed to retrieve proper pool stats within 60 seconds"
+ fi
+
+ cleanup
+
+ # after cleanup?
+ MATCH_CNT=0
+ for i in {1..60}
+ do
+ IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "")
+ IFS=' ' read -ra VALS <<< "$IN"
+
+ # verification is a bit tricky due to stats report's eventual model
+ # VALS[1] - STORED
+ # VALS[2] - STORED units
+ # VALS[3] - OBJECTS
+ # VALS[5] - COPIES
+ # VALS[12] - WR_OPS
+ # VALS[13] - WR
+ # VALS[14] - WR uints
+ # implies replication factor 3
+ if [ ${VALS[1]} == 0 ] && [ ${VALS[2]} == "B" ] && [ ${VALS[3]} == "0" ] && [ ${VALS[5]} == "0" ] && [ ${VALS[12]} == "5" ] && [ ${VALS[13]} == 512 ] && [ ${VALS[14]} == "KiB" ]
+ then
+ # enforce multiple match to make sure stats aren't changing any more
+ MATCH_CNT=$((MATCH_CNT+1))
+ [[ $MATCH_CNT == 3 ]] && break
+ sleep 1
+ continue
+ fi
+ MATCH_CNT=0
+ sleep 1
+ continue
+ done
+ if [ ${VALS[1]} != 0 ] || [ ${VALS[2]} != "B" ] || [ ${VALS[3]} != "0" ] || [ ${VALS[5]} != "0" ] || [ ${VALS[12]} != "5" ] || [ ${VALS[13]} != 512 ] || [ ${VALS[14]} != "KiB" ]
+ then
+ die "Failed to retrieve proper pool stats within 60 seconds"
+ fi
+
+ ############ rados df test (EC pool): ##############
+ $RADOS_TOOL purge $POOL_EC --yes-i-really-really-mean-it
+ $CEPH_TOOL osd pool rm $POOL_EC $POOL_EC --yes-i-really-really-mean-it
+ $CEPH_TOOL osd erasure-code-profile set myprofile k=2 m=1 stripe_unit=2K crush-failure-domain=osd --force
+ $CEPH_TOOL osd pool create $POOL_EC 8 8 erasure
+
+ # put object
+ $RADOS_TOOL -p $POOL_EC put $OBJ ./rados_object_128k
+ MATCH_CNT=0
+ if [ "" == "$bluestore" ];
+ then
+ STORED=128
+ STORED_UNIT="KiB"
+ else
+ STORED=192
+ STORED_UNIT="KiB"
+ fi
+ for i in {1..60}
+ do
+ IN=$($RADOS_TOOL -p $POOL_EC df | grep $POOL_EC ; [[ ! -z $? ]] && echo "")
+ [[ -z $IN ]] && sleep 1 && continue
+ IFS=' ' read -ra VALS <<< "$IN"
+
+ # verification is a bit tricky due to stats report's eventual model
+ # VALS[1] - STORED
+ # VALS[2] - STORED units
+ # VALS[3] - OBJECTS
+ # VALS[5] - COPIES
+ # VALS[12] - WR_OPS
+ # VALS[13] - WR
+ # VALS[14] - WR uints
+ # implies replication factor 2+1
+ if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "1" ] && [ ${VALS[13]} == 128 ] && [ ${VALS[14]} == "KiB" ]
+ then
+ # enforce multiple match to make sure stats aren't changing any more
+ MATCH_CNT=$((MATCH_CNT+1))
+ [[ $MATCH_CNT == 3 ]] && break
+ sleep 1
+ continue
+ fi
+ MATCH_CNT=0
+ sleep 1
+ continue
+ done
+ [[ -z $IN ]] && die "Failed to retrieve any pool stats within 60 seconds"
+ if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "1" ] || [ ${VALS[13]} != 128 ] || [ ${VALS[14]} != "KiB" ]
+ then
+ die "Failed to retrieve proper pool stats within 60 seconds"
+ fi
+
+ # overwrite object
+ $RADOS_TOOL -p $POOL_EC put $OBJ ./rados_object_128k
+ MATCH_CNT=0
+ if [ "" == "$bluestore" ];
+ then
+ STORED=128
+ STORED_UNIT="KiB"
+ else
+ STORED=192
+ STORED_UNIT="KiB"
+ fi
+ for i in {1..60}
+ do
+ IN=$($RADOS_TOOL -p $POOL_EC df | grep $POOL_EC ; [[ ! -z $? ]] && echo "")
+ IFS=' ' read -ra VALS <<< "$IN"
+
+ # verification is a bit tricky due to stats report's eventual model
+ # VALS[1] - STORED
+ # VALS[2] - STORED units
+ # VALS[3] - OBJECTS
+ # VALS[5] - COPIES
+ # VALS[12] - WR_OPS
+ # VALS[13] - WR
+ # VALS[14] - WR uints
+ # implies replication factor 2+1
+ if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "2" ] && [ ${VALS[13]} == 256 ] && [ ${VALS[14]} == "KiB" ]
+ then
+ # enforce multiple match to make sure stats aren't changing any more
+ MATCH_CNT=$((MATCH_CNT+1))
+ [[ $MATCH_CNT == 3 ]] && break
+ sleep 1
+ continue
+ fi
+ MATCH_CNT=0
+ sleep 1
+ continue
+ done
+ if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "2" ] || [ ${VALS[13]} != 256 ] || [ ${VALS[14]} != "KiB" ]
+ then
+ die "Failed to retrieve proper pool stats within 60 seconds"
+ fi
+
+ cleanup
+
+ # after cleanup?
+ MATCH_CNT=0
+ for i in {1..60}
+ do
+ IN=$($RADOS_TOOL -p $POOL_EC df | grep $POOL_EC ; [[ ! -z $? ]] && echo "")
+ IFS=' ' read -ra VALS <<< "$IN"
+
+ # verification is a bit tricky due to stats report's eventual model
+ # VALS[1] - STORED
+ # VALS[2] - STORED units
+ # VALS[3] - OBJECTS
+ # VALS[5] - COPIES
+ # VALS[12] - WR_OPS
+ # VALS[13] - WR
+ # VALS[14] - WR uints
+ # implies replication factor 2+1
+ if [ ${VALS[1]} == 0 ] && [ ${VALS[2]} == "B" ] && [ ${VALS[3]} == "0" ] && [ ${VALS[5]} == "0" ] && [ ${VALS[12]} == "3" ] && [ ${VALS[13]} == 256 ] && [ ${VALS[14]} == "KiB" ]
+ then
+ # enforce multiple match to make sure stats aren't changing any more
+ MATCH_CNT=$((MATCH_CNT+1))
+ [[ $MATCH_CNT == 3 ]] && break
+ sleep 1
+ continue
+ fi
+ MATCH_CNT=0
+ sleep 1
+ continue
+ done
+ if [ ${VALS[1]} != 0 ] || [ ${VALS[2]} != "B" ] || [ ${VALS[3]} != "0" ] || [ ${VALS[5]} != "0" ] || [ ${VALS[12]} != "3" ] || [ ${VALS[13]} != 256 ] || [ ${VALS[14]} != "KiB" ]
+ then
+ die "Failed to retrieve proper pool stats within 60 seconds"
+ fi
+
+ rm -rf ./rados_object_128k
+}
+
+test_xattr
+test_omap
+test_rmobj
+test_ls
+test_cleanup
+test_append
+test_put
+test_stat
+
+# clean up environment, delete pool
+$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it
+$CEPH_TOOL osd pool delete $POOL_EC $POOL_EC --yes-i-really-really-mean-it
+$CEPH_TOOL osd pool delete $POOL_CP_TARGET $POOL_CP_TARGET --yes-i-really-really-mean-it
+
+echo "SUCCESS!"
+exit 0