summaryrefslogtreecommitdiffstats
path: root/ctdb/tests/INTEGRATION/database
diff options
context:
space:
mode:
Diffstat (limited to 'ctdb/tests/INTEGRATION/database')
-rwxr-xr-xctdb/tests/INTEGRATION/database/basics.001.attach.sh48
-rwxr-xr-xctdb/tests/INTEGRATION/database/basics.002.attach.sh116
-rwxr-xr-xctdb/tests/INTEGRATION/database/basics.003.detach.sh166
-rwxr-xr-xctdb/tests/INTEGRATION/database/basics.004.wipe.sh56
-rwxr-xr-xctdb/tests/INTEGRATION/database/basics.010.backup_restore.sh97
-rwxr-xr-xctdb/tests/INTEGRATION/database/fetch.001.ring.sh34
-rwxr-xr-xctdb/tests/INTEGRATION/database/fetch.002.ring-hotkeys.sh161
-rwxr-xr-xctdb/tests/INTEGRATION/database/readonly.001.basic.sh178
-rwxr-xr-xctdb/tests/INTEGRATION/database/recovery.001.volatile.sh118
-rwxr-xr-xctdb/tests/INTEGRATION/database/recovery.002.large.sh106
-rwxr-xr-xctdb/tests/INTEGRATION/database/recovery.003.no_resurrect.sh63
-rwxr-xr-xctdb/tests/INTEGRATION/database/recovery.010.persistent.sh103
-rwxr-xr-xctdb/tests/INTEGRATION/database/recovery.011.continue.sh73
-rw-r--r--ctdb/tests/INTEGRATION/database/scripts/local.bash116
-rwxr-xr-xctdb/tests/INTEGRATION/database/transaction.001.ptrans.sh110
-rwxr-xr-xctdb/tests/INTEGRATION/database/transaction.002.loop.sh28
-rwxr-xr-xctdb/tests/INTEGRATION/database/transaction.003.loop_recovery.sh50
-rwxr-xr-xctdb/tests/INTEGRATION/database/transaction.004.update_record.sh80
-rwxr-xr-xctdb/tests/INTEGRATION/database/transaction.010.loop_recovery.sh51
-rwxr-xr-xctdb/tests/INTEGRATION/database/traverse.001.one.sh116
-rwxr-xr-xctdb/tests/INTEGRATION/database/traverse.002.many.sh52
-rwxr-xr-xctdb/tests/INTEGRATION/database/vacuum.001.fast.sh159
-rwxr-xr-xctdb/tests/INTEGRATION/database/vacuum.002.full.sh96
-rwxr-xr-xctdb/tests/INTEGRATION/database/vacuum.003.recreate.sh139
-rwxr-xr-xctdb/tests/INTEGRATION/database/vacuum.030.locked.sh102
-rwxr-xr-xctdb/tests/INTEGRATION/database/vacuum.031.locked.sh114
-rwxr-xr-xctdb/tests/INTEGRATION/database/vacuum.032.locked.sh102
-rwxr-xr-xctdb/tests/INTEGRATION/database/vacuum.033.locked.sh117
-rwxr-xr-xctdb/tests/INTEGRATION/database/vacuum.034.locked.sh129
29 files changed, 2880 insertions, 0 deletions
diff --git a/ctdb/tests/INTEGRATION/database/basics.001.attach.sh b/ctdb/tests/INTEGRATION/database/basics.001.attach.sh
new file mode 100755
index 0000000..1fbffc5
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/basics.001.attach.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+
+# Verify that 'ctdb getdbmap' operates as expected
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+select_test_node
+
+# test_node set by select_test_node() above
+# shellcheck disable=SC2154
+ctdb_onnode -v "$test_node" getdbmap
+
+dbid='dbid:0x[[:xdigit:]]+'
+name='name:[^[:space:]]+'
+path='path:[^[:space:]]+'
+opts='( (PERSISTENT|STICKY|READONLY|REPLICATED|UNHEALTHY))*'
+line="${dbid} ${name} ${path}${opts}"
+dbmap_pattern="^(Number of databases:[[:digit:]]+|${line})\$"
+
+# outfile set by ctdb_onnode() above
+# shellcheck disable=SC2154
+num_db_init=$(sed -n -e '1s/.*://p' "$outfile")
+
+sanity_check_output $(($num_db_init + 1)) "$dbmap_pattern"
+
+for i in $(seq 1 5) ; do
+ f="attach_test_${i}.tdb"
+ echo "Creating test database: $f"
+ ctdb_onnode "$test_node" "attach ${f}"
+
+ ctdb_onnode "$test_node" getdbmap
+ sanity_check_output $((num_db_init + 1)) "$dbmap_pattern"
+ num=$(sed -n -e '1s/^.*://p' "$outfile")
+ if [ "$num" = $((num_db_init + i)) ] ; then
+ echo "OK: correct number of additional databases"
+ else
+ ctdb_test_fail "BAD: no additional database"
+ fi
+ if awk '{print $2}' "$outfile" | grep -Fqx "name:$f" ; then
+ echo "OK: getdbmap knows about \"$f\""
+ else
+ ctdb_test_fail "BAD: getdbmap does not know about \"$f\""
+ fi
+done
diff --git a/ctdb/tests/INTEGRATION/database/basics.002.attach.sh b/ctdb/tests/INTEGRATION/database/basics.002.attach.sh
new file mode 100755
index 0000000..6a5c812
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/basics.002.attach.sh
@@ -0,0 +1,116 @@
+#!/usr/bin/env bash
+
+# Verify that databases are attached a node joins the cluster:
+# 1. Shut down CTDB on one node
+# 2. Attach test databases
+# 3. Check that databases are attached on all up nodes
+# 4. Start CTDB on the node where it is shut down
+# 5. Verify that the test databases are attached on this node
+# 6. Restart one of the nodes
+# 7. Verify that the test databases are attached on this node
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+######################################################################
+
+try_command_on_node 0 "$CTDB listnodes -X | wc -l"
+numnodes="$out"
+lastnode=$(( numnodes - 1 ))
+
+######################################################################
+
+# Confirm that the database is attached with appropriate flags
+check_db_once ()
+{
+ local pnn="$1"
+ local db="$2"
+
+ try_command_on_node "$pnn" $CTDB getdbmap
+ if grep -qF "name:${db}" "$outfile" >/dev/null ; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+check_db ()
+{
+ local pnn="$1"
+ local db="$2"
+ local flag="$3"
+
+ local flags
+
+ echo "Waiting until database ${db} is attached on node ${pnn}"
+ wait_until 10 check_db_once "$pnn" "$db"
+
+ flags=$(awk -v db="$db" '$2 == "name:" db {print $4}' "$outfile")
+ if [ "$flags" = "$flag" ]; then
+ echo "GOOD: db ${db} attached on node ${pnn} with flag $flag"
+ else
+ echo "BAD: db ${db} attached on node ${pnn} with wrong flag"
+ cat "$outfile"
+ exit 1
+ fi
+}
+
+######################################################################
+
+testdb1="test_volatile.tdb"
+testdb2="test_persistent.tdb"
+testdb3="test_replicated.tdb"
+
+test_node="0"
+
+echo "Shutting down node $test_node"
+ctdb_nodes_stop "$test_node"
+sleep 1
+wait_until_node_has_status 1 recovered
+try_command_on_node -v 1 $CTDB status
+
+echo "Create test databases"
+try_command_on_node 1 $CTDB attach "$testdb1"
+try_command_on_node 1 $CTDB attach "$testdb2" persistent
+try_command_on_node 1 $CTDB attach "$testdb3" replicated
+
+echo
+echo "Checking if database is attached with correct flags"
+for node in $(seq 0 $lastnode) ; do
+ if [ $node -ne $test_node ] ; then
+ check_db $node $testdb1 ""
+ check_db $node $testdb2 PERSISTENT
+ check_db $node $testdb3 REPLICATED
+ fi
+done
+
+######################################################################
+
+echo
+echo "Start node $test_node"
+ctdb_nodes_start "$test_node"
+sleep 1
+wait_until_ready
+
+echo
+echo "Checking if database is attached with correct flags"
+check_db $test_node $testdb1 ""
+check_db $test_node $testdb2 PERSISTENT
+check_db $test_node $testdb3 REPLICATED
+
+######################################################################
+
+echo
+echo "Restarting node $test_node"
+ctdb_nodes_restart "$test_node"
+sleep 1
+wait_until_ready
+
+echo
+echo "Checking if database is attached with correct flags"
+check_db $test_node $testdb1 ""
+check_db $test_node $testdb2 PERSISTENT
+check_db $test_node $testdb3 REPLICATED
diff --git a/ctdb/tests/INTEGRATION/database/basics.003.detach.sh b/ctdb/tests/INTEGRATION/database/basics.003.detach.sh
new file mode 100755
index 0000000..cb44955
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/basics.003.detach.sh
@@ -0,0 +1,166 @@
+#!/usr/bin/env bash
+
+# Verify that 'ctdb detach' works as expected:
+# 1. Attach test databases
+# 2. Detach test databases
+# 3. Confirm test databases are not attached
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+######################################################################
+
+try_command_on_node 0 "$CTDB listnodes -X | wc -l"
+numnodes="$out"
+
+######################################################################
+
+# Confirm that the database is attached
+check_db_once ()
+{
+ local db="$1"
+
+ local num_db
+
+ try_command_on_node all "$CTDB getdbmap"
+ num_db=$(grep -cF "name:${db}" "$outfile") || true
+ if [ "$num_db" -eq "$numnodes" ]; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+check_db ()
+{
+ local db="$1"
+
+ echo "Waiting until database ${db} is attached on all nodes"
+ wait_until 10 check_db_once "$db"
+}
+
+# Confirm that no nodes have databases attached
+check_no_db_once ()
+{
+ local db="$1"
+
+ local num_db
+
+ try_command_on_node all "$CTDB getdbmap"
+ num_db=$(grep -cF "name:${db}" "$outfile") || true
+ if [ "$num_db" -eq 0 ]; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+check_no_db ()
+{
+ local db="$1"
+
+ echo "Waiting until database ${db} is detached on all nodes"
+ wait_until 10 check_no_db_once "$db"
+}
+
+######################################################################
+
+testdb1="detach_test1.tdb"
+testdb2="detach_test2.tdb"
+testdb3="detach_test3.tdb"
+testdb4="detach_test4.tdb"
+
+echo "Create test databases"
+for db in "$testdb1" "$testdb2" "$testdb3" "$testdb4" ; do
+ echo " $db"
+ try_command_on_node 0 $CTDB attach "$db"
+done
+
+for db in "$testdb1" "$testdb2" "$testdb3" "$testdb4" ; do
+ check_db "$db"
+done
+
+######################################################################
+
+echo
+echo "Ensuring AllowClientDBAttach=1 on all nodes"
+try_command_on_node all $CTDB setvar AllowClientDBAttach 1
+
+echo "Check failure detaching single test database $testdb1"
+try_command_on_node 1 "! $CTDB detach $testdb1"
+check_db "$testdb1"
+
+echo
+echo "Setting AllowClientDBAttach=0 on node 0"
+try_command_on_node 0 $CTDB setvar AllowClientDBAttach 0
+
+echo "Check failure detaching single test database $testdb1"
+try_command_on_node 1 "! $CTDB detach $testdb1"
+check_db "$testdb1"
+
+echo
+echo "Setting AllowClientDBAttach=0 on all nodes"
+try_command_on_node all $CTDB setvar AllowClientDBAttach 0
+
+echo "Check detaching single test database $testdb1"
+try_command_on_node 1 "$CTDB detach $testdb1"
+check_no_db "$testdb1"
+
+######################################################################
+
+echo
+echo "Detach multiple test databases"
+echo " $testdb2, $testdb3, $testdb4"
+try_command_on_node 0 $CTDB detach $testdb2 $testdb3 $testdb4
+
+for db in "$testdb2" "$testdb3" "$testdb4" ; do
+ check_no_db "$db"
+done
+
+######################################################################
+
+echo
+echo "Attach a single test database"
+try_command_on_node all $CTDB setvar AllowClientDBAttach 1
+try_command_on_node 0 $CTDB attach $testdb1
+check_db "$testdb1"
+
+echo
+echo "Write a key to database"
+try_command_on_node 0 $CTDB writekey $testdb1 foo bar
+try_command_on_node 0 $CTDB catdb $testdb1
+num_keys=$(sed -n -e 's/Dumped \([0-9]*\) records/\1/p' "$outfile") || true
+if [ -n "$num_keys" -a $num_keys -eq 1 ]; then
+ echo "GOOD: Key added to database"
+else
+ echo "BAD: Key did not get added to database"
+ cat "$outfile"
+ exit 1
+fi
+
+echo
+echo "Detach test database"
+try_command_on_node all $CTDB setvar AllowClientDBAttach 0
+try_command_on_node 0 $CTDB detach $testdb1
+check_no_db "$testdb1"
+
+echo
+echo "Re-attach test database"
+try_command_on_node all $CTDB setvar AllowClientDBAttach 1
+try_command_on_node 0 $CTDB attach $testdb1
+check_db "$testdb1"
+
+echo
+echo "Check if the database is empty"
+try_command_on_node 0 $CTDB catdb $testdb1
+num_keys=$(sed -n -e 's/Dumped \([0-9]*\) records/\1/p' "$outfile") || true
+if [ -n "$num_keys" -a $num_keys -eq 0 ]; then
+ echo "GOOD: Database $testdb1 is empty"
+else
+ echo "BAD: Database $testdb1 is not empty"
+ cat "$outfile"
+ exit 1
+fi
diff --git a/ctdb/tests/INTEGRATION/database/basics.004.wipe.sh b/ctdb/tests/INTEGRATION/database/basics.004.wipe.sh
new file mode 100755
index 0000000..115d64c
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/basics.004.wipe.sh
@@ -0,0 +1,56 @@
+#!/usr/bin/env bash
+
+# Verify that 'ctdb wipedb' can clear a persistent database:
+# 1. Verify that the status on all of the ctdb nodes is 'OK'.
+# 2. Create a persistent test database
+# 3. Add some records to node 0 and node 1
+# 4. Run wipedb on node 0
+# 5. verify the database is empty on both node 0 and 1
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+try_command_on_node 0 "$CTDB listnodes | wc -l"
+num_nodes="$out"
+
+# 2.
+test_db="persistent_test.tdb"
+echo "Create persistent test database \"$test_db\""
+try_command_on_node 0 $CTDB attach "$test_db" persistent
+
+# 3.
+# add one record to node 0 key==ABC data==ABC
+echo "Store key(ABC) data(ABC) on node 0"
+db_ctdb_tstore 0 "$test_db" "ABC" "ABC"
+
+# add one record to node 1 key==DEF data==DEF
+echo "Store key(DEF) data(DEF) on node 1"
+db_ctdb_tstore 1 "$test_db" "DEF" "DEF"
+
+# 4.
+echo "Wipe database"
+try_command_on_node 0 $CTDB wipedb "$test_db"
+
+# check that the database is wiped
+num_records=$(db_ctdb_cattdb_count_records 1 "$test_db")
+if [ $num_records = "0" ] ; then
+ echo "OK: Database was wiped"
+else
+ echo "BAD: We did not end up with an empty database"
+ exit 1
+fi
+
+echo "Force a recovery"
+try_command_on_node 0 $CTDB recover
+
+# check that the database is wiped
+num_records=$(db_ctdb_cattdb_count_records 1 "$test_db")
+if [ $num_records = "0" ] ; then
+ echo "OK: Database was wiped"
+else
+ echo "BAD: We did not end up with an empty database"
+ exit 1
+fi
diff --git a/ctdb/tests/INTEGRATION/database/basics.010.backup_restore.sh b/ctdb/tests/INTEGRATION/database/basics.010.backup_restore.sh
new file mode 100755
index 0000000..8c469d4
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/basics.010.backup_restore.sh
@@ -0,0 +1,97 @@
+#!/usr/bin/env bash
+
+# Confirm that 'ctdb restoredb' works correctly:
+# 1. Create a persistent test database
+# 2. Add some records to test database
+# 3. Backup database
+# 4. Wipe database and verify the database is empty on all nodes
+# 5. Restore database and make sure all the records are restored
+# 6. Make sure no recovery has been triggered
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+try_command_on_node 0 $CTDB status
+generation=$(sed -n -e 's/^Generation:\([0-9]*\)/\1/p' "$outfile")
+
+try_command_on_node 0 "$CTDB listnodes | wc -l"
+num_nodes="$out"
+
+# 2.
+test_db="restoredb_test.tdb"
+test_dump=$(mktemp)
+echo $test_dump
+echo "Create persistent test database \"$test_db\""
+try_command_on_node 0 $CTDB attach "$test_db" persistent
+try_command_on_node 0 $CTDB wipedb "$test_db"
+
+# 3.
+# add 10,000 records to database
+echo "Adding 10000 records to database"
+(
+for i in $(seq 1 10000) ; do
+ echo "\"key$i\" \"value$i\""
+done
+) | try_command_on_node -i 0 $CTDB ptrans "$test_db"
+
+num_records=$(db_ctdb_cattdb_count_records 1 "$test_db")
+if [ $num_records = "10000" ] ; then
+ echo "OK: Records added"
+else
+ echo "BAD: We did not end up with 10000 records"
+ echo "num records = $num_records"
+ exit 1
+fi
+
+ctdb_test_exit_hook_add "rm -f $test_dump"
+
+# 4.
+echo "Backup database"
+try_command_on_node 0 $CTDB backupdb "$test_db" "$test_dump"
+
+# 5.
+echo "Wipe database"
+try_command_on_node 0 $CTDB wipedb "$test_db"
+
+# check that the database is restored
+num_records=$(db_ctdb_cattdb_count_records 1 "$test_db")
+if [ $num_records = "0" ] ; then
+ echo "OK: Database was wiped"
+else
+ echo "BAD: We did not end up with an empty database"
+ echo "num records = $num_records"
+ exit 1
+fi
+
+# 6.
+echo "Restore database"
+try_command_on_node 0 $CTDB restoredb "$test_dump" "$test_db"
+
+# check that the database is restored
+num_records=$(db_ctdb_cattdb_count_records 1 "$test_db")
+if [ $num_records = "10000" ] ; then
+ echo "OK: Database was restored"
+else
+ echo "BAD: We did not end up with 10000 records"
+ echo "num records = $num_records"
+ exit 1
+fi
+
+# 7.
+wait_until_ready
+
+try_command_on_node 0 $CTDB status
+new_generation=$(sed -n -e 's/^Generation:\([0-9]*\)/\1/p' "$outfile")
+
+echo "Old generation = $generation"
+echo "New generation = $new_generation"
+
+if [ "$generation" = "$new_generation" ]; then
+ echo "OK: Database recovery not triggered."
+else
+ echo "BAD: Database recovery triggered."
+ exit 1
+fi
diff --git a/ctdb/tests/INTEGRATION/database/fetch.001.ring.sh b/ctdb/tests/INTEGRATION/database/fetch.001.ring.sh
new file mode 100755
index 0000000..4d7d392
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/fetch.001.ring.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+
+# Run the fetch_ring test and sanity check the output
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+try_command_on_node 0 "$CTDB listnodes | wc -l"
+num_nodes="$out"
+
+echo "Running fetch_ring on all $num_nodes nodes."
+testprog_onnode -v -p all \
+ fetch_ring -n "$num_nodes" -D "fetch_ring.tdb" -k "testkey"
+
+pat='^(Waiting for cluster|Fetch\[[[:digit:]]+\]: [[:digit:]]+(\.[[:digit:]]+)? msgs/sec)$'
+sanity_check_output 1 "$pat"
+
+# Get the last line of output.
+last=$(tail -n 1 "$outfile")
+
+# $last should look like this:
+# Fetch[1]: 10670.93 msgs/sec
+stuff="${last##*Fetch\[*\]: }"
+mps="${stuff% msgs/sec*}"
+
+if [ ${mps%.*} -ge 10 ] ; then
+ echo "OK: $mps msgs/sec >= 10 msgs/sec"
+else
+ echo "BAD: $mps msgs/sec < 10 msgs/sec"
+ exit 1
+fi
diff --git a/ctdb/tests/INTEGRATION/database/fetch.002.ring-hotkeys.sh b/ctdb/tests/INTEGRATION/database/fetch.002.ring-hotkeys.sh
new file mode 100755
index 0000000..6d44253
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/fetch.002.ring-hotkeys.sh
@@ -0,0 +1,161 @@
+#!/usr/bin/env bash
+
+# Run the fetch_ring test, sanity check the output and check hot keys
+# statistics
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+testdb="fetch_ring.tdb"
+
+ctdb_get_all_pnns
+# $all_pnns is set above
+# shellcheck disable=SC2154
+num_nodes=$(echo "$all_pnns" | wc -w | tr -d '[:space:]')
+first=$(echo "$all_pnns" | sed -n -e '1p')
+
+get_key ()
+{
+ _n="$1"
+
+ echo "testkey${_n}"
+}
+
+run_fetch_ring ()
+{
+ _timelimit="$1"
+ _key_num="$2"
+
+ _key=$(get_key "$_key_num")
+ _base_cmd="fetch_ring -n ${num_nodes} -D ${testdb}"
+ _cmd="${_base_cmd} -t ${_timelimit} -k ${_key}"
+ echo "Running \"${_cmd}\" on all $num_nodes nodes."
+ testprog_onnode -v -p all "$_cmd"
+
+ _pat='^(Waiting for cluster|Fetch\[[[:digit:]]+\]: [[:digit:]]+(\.[[:digit:]]+)? msgs/sec)$'
+ sanity_check_output 1 "$_pat"
+
+ # Get the last line of output.
+ # $outfile is set above by testprog_onnode()
+ # shellcheck disable=SC2154
+ _last=$(tail -n 1 "$outfile")
+
+ # $last should look like this:
+ # Fetch[1]: 10670.93 msgs/sec
+ _stuff="${_last##*Fetch\[*\]: }"
+ _mps="${_stuff% msgs/sec*}"
+
+ if [ "${_mps%.*}" -ge 10 ] ; then
+ echo "OK: ${_mps} msgs/sec >= 10 msgs/sec"
+ else
+ ctdb_test_fail "BAD: ${_mps} msgs/sec < 10 msgs/sec"
+ fi
+}
+
+check_hot_keys ()
+{
+ _pnn="$1"
+ _first_key="$2"
+ _num_keys="$3"
+
+ echo
+ echo "Checking hot keys on node ${_pnn}"
+
+ ctdb_onnode "$_pnn" dbstatistics "$testdb"
+
+ # Get hot keys with a non-empty key
+ _hotkeys=$(grep -Ex '[[:space:]]+Count:[[:digit:]]+ Key:[[:xdigit:]]+' \
+ "$outfile") || true
+
+ # Check that there are the right number of non-empty slots
+ if [ -z "$_hotkeys" ] ; then
+ _num=0
+ else
+ _num=$(echo "$_hotkeys" | wc -l | tr -d '[:space:]')
+ fi
+ _msg="hot key slots in use = ${_num}"
+ if [ "$_num_keys" -ne "$_num" ] ; then
+ echo
+ cat "$outfile"
+ ctdb_test_fail "BAD: ${_msg} (expected ${_num_keys})"
+ fi
+ echo "GOOD: ${_msg}"
+
+ # No hot keys? Done...
+ if [ "$_num" = 0 ] ; then
+ return
+ fi
+
+ # Check that hot key counts are correctly sorted
+ #
+ # Try to be as POSIX as possible
+ # shellcheck disable=SC2001
+ _counts=$(echo "$_hotkeys" | \
+ sed -e 's|.*Count:\([[:digit:]][[:digit:]]*\).*|\1|')
+ _counts_sorted=$(echo "$_counts" | sort -n)
+ if [ "$_counts" != "$_counts_sorted" ] ; then
+ echo
+ cat "$outfile"
+ ctdb_test_fail "BAD: hot keys not sorted"
+ fi
+ echo "GOOD: hot key counts are correctly sorted"
+
+ # Check that all keys are considered hot
+ for _j in $(seq "$_first_key" $((_first_key + _num_keys - 1))) ; do
+ _key=$(get_key "$_j")
+ _key_hex=$(printf '%s' "$_key" | \
+ od -A n -t x1 | \
+ tr -d '[:space:]')
+ if ! echo "$_hotkeys" | grep -q "Key:${_key_hex}\$" ; then
+ echo
+ cat "$outfile"
+ ctdb_test_fail "BAD: key \"${_key}\" is not a hot key"
+ fi
+ done
+ echo "GOOD: all keys are listed as hot keys"
+}
+
+# Run fetch_ring for each of 10 keys. After each run confirm that all
+# keys used so far are considered hot keys (and do other hot key
+# sanity checks) on all nodes.
+for i in $(seq 1 10) ; do
+ run_fetch_ring 5 "$i"
+
+ for pnn in $all_pnns ; do
+ check_hot_keys "$pnn" 1 "$i"
+ done
+
+ echo
+done
+
+echo
+echo "Resetting statistics on node ${first}"
+ctdb_onnode "$first" statisticsreset
+
+# Ensure that only node $first has had statistics reset
+for pnn in $all_pnns ; do
+ if [ "$pnn" = "$first" ] ; then
+ check_hot_keys "$pnn" 1 0
+ else
+ check_hot_keys "$pnn" 1 10
+ fi
+done
+
+echo
+
+# Run fetch_ring for each of 3 new keys. After each run confirm that
+# the new keys used so far are considered hot keys (and do other hot
+# key sanity checks) on node $first.
+#
+# Note that nothing can be said about hot keys on other nodes, since
+# they may be an arbitrary blend of old and new keys.
+for i in $(seq 1 3) ; do
+ run_fetch_ring 5 $((100 + i))
+
+ check_hot_keys 0 101 "$i"
+
+ echo
+done
diff --git a/ctdb/tests/INTEGRATION/database/readonly.001.basic.sh b/ctdb/tests/INTEGRATION/database/readonly.001.basic.sh
new file mode 100755
index 0000000..aeb9740
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/readonly.001.basic.sh
@@ -0,0 +1,178 @@
+#!/usr/bin/env bash
+
+# Test support for read-only records
+
+# Read-only records can be activated at runtime using a ctdb command.
+# If read-only records are not activated, then any attempt to fetch a
+# read-only copy should be automatically upgraded to a read-write
+# fetch_locked().
+
+# If read-only delegations are present, then any attempt to acquire a
+# read-write fetch_lock will trigger revocation of all delegations
+# before the fetch_locked().
+
+# 1. Create a test database and some records
+# 2. Try to fetch read-only records, this should not result in any delegations
+# 3. Activate read-only support
+# 4. Try to fetch read-only records, this should result in delegations
+# 5. Do a fetchlock and the delegations should be revoked
+# 6. Try to fetch read-only records, this should result in delegations
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+######################################################################
+
+# Confirm that no nodes have databases with read-only delegations
+check_no_readonly ()
+{
+ try_command_on_node all $CTDB cattdb $testdb
+ local ro_flags="RO_HAVE_READONLY|RO_HAVE_DELEGATIONS"
+ local numreadonly=$(grep -c -E "$ro_flags" "$outfile") || true
+ if [ $numreadonly -eq 0 ] ; then
+ echo "GOOD: no read-only delegations"
+ else
+ echo "BAD: there are read-only delegations"
+ cat "$outfile"
+ exit 1
+ fi
+}
+
+# Check that the test record has the correct read-only flags on the
+# given nodes. The first node is the dmaster, which should know there
+# are delegations but should not be flagged as having a read-only
+# copy. Subsequent nodes should have a read-only copy but not know
+# about any (other) delegations.
+check_readonly ()
+{
+ local dmaster="$1" ; shift
+ local others="$*"
+
+ local count
+
+ try_command_on_node $dmaster $CTDB cattdb $testdb
+ count=$(grep -c -E "RO_HAVE_DELEGATIONS" "$outfile") || true
+ if [ $count -eq 1 ] ; then
+ echo "GOOD: dmaster ${dmaster} has read-only delegations"
+ else
+ echo "BAD: dmaster ${dmaster} has no read-only delegations"
+ cat "$outfile"
+ exit 1
+ fi
+ count=$(grep -c -E "RO_HAVE_READONLY" "$outfile") || true
+ if [ $count -ne 0 ] ; then
+ echo "BAD: dmaster ${dmaster} has a read-only copy"
+ cat "$outfile"
+ exit 1
+ fi
+
+ local o
+ for o in $others ; do
+ try_command_on_node $o $CTDB cattdb $testdb
+ count=$(grep -c -E "RO_HAVE_READONLY" "$outfile") || true
+ if [ $count -eq 1 ] ; then
+ echo "GOOD: node ${o} has a read-only copy"
+ else
+ echo "BAD: node ${o} has no read-only copy"
+ cat "$outfile"
+ exit 1
+ fi
+ count=$(grep -c -E "RO_HAVE_DELEGATIONS" "$outfile") || true
+ if [ $count -ne 0 ] ; then
+ echo "BAD: other node ${o} has read-only delegations"
+ cat "$outfile"
+ exit 1
+ fi
+ done
+}
+
+######################################################################
+
+echo "Get list of nodes..."
+ctdb_onnode 0 "-X listnodes"
+all_nodes=$(awk -F'|' '{print $2}' "$outfile")
+
+######################################################################
+
+testdb="test.tdb"
+echo "Create test database \"${testdb}\""
+try_command_on_node 0 $CTDB attach $testdb
+
+echo "Create some records..."
+try_command_on_node all $CTDB_TEST_WRAPPER $VALGRIND update_record \
+ -D ${testdb} -k testkey
+
+######################################################################
+
+echo "Try some readonly fetches, these should all be upgraded to full fetchlocks..."
+try_command_on_node all $CTDB_TEST_WRAPPER $VALGRIND fetch_readonly \
+ -D ${testdb} -k testkey
+
+check_no_readonly
+
+######################################################################
+
+echo "Activate read-only record support for \"$testdb\"..."
+try_command_on_node all $CTDB setdbreadonly $testdb
+
+# Database should be tagged as READONLY
+try_command_on_node 0 $CTDB getdbmap
+db_details=$(awk -v db="$testdb" '$2 == foo="name:" db { print }' "$outfile")
+if grep -q "READONLY" <<<"$db_details" ; then
+ echo "GOOD: read-only record support is enabled"
+else
+ echo "BAD: could not activate read-only support"
+ echo "$db_details"
+ exit 1
+fi
+
+######################################################################
+
+echo "Create 1 read-only delegation ..."
+# dmaster=1
+try_command_on_node 1 $CTDB_TEST_WRAPPER $VALGRIND update_record \
+ -D ${testdb} -k testkey
+
+# Fetch read-only to node 0
+try_command_on_node 0 $CTDB_TEST_WRAPPER $VALGRIND fetch_readonly \
+ -D ${testdb} -k testkey
+
+check_readonly 1 0
+
+######################################################################
+
+echo "Verify that a fetchlock revokes read-only delegations..."
+# Node 1 becomes dmaster
+try_command_on_node 1 $CTDB_TEST_WRAPPER $VALGRIND update_record \
+ -D ${testdb} -k testkey
+
+check_no_readonly
+
+######################################################################
+
+echo "Create more read-only delegations..."
+dmaster=1
+try_command_on_node $dmaster $CTDB_TEST_WRAPPER $VALGRIND update_record \
+ -D ${testdb} -k testkey
+
+others=""
+for n in $all_nodes ; do
+ if [ "$n" != "$dmaster" ] ; then
+ # Fetch read-only copy to this node
+ try_command_on_node $n $CTDB_TEST_WRAPPER $VALGRIND fetch_readonly \
+ -D ${testdb} -k testkey
+ others="${others} ${n}"
+ fi
+done
+
+check_readonly $dmaster $others
+
+######################################################################
+
+echo "Verify that a recovery will revoke the delegations..."
+try_command_on_node 0 $CTDB recover
+
+check_no_readonly
diff --git a/ctdb/tests/INTEGRATION/database/recovery.001.volatile.sh b/ctdb/tests/INTEGRATION/database/recovery.001.volatile.sh
new file mode 100755
index 0000000..d7aaa3b
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/recovery.001.volatile.sh
@@ -0,0 +1,118 @@
+#!/usr/bin/env bash
+
+# Test that recovery correctly handles RSNs
+
+# Recovery can under certain circumstances lead to old record copies
+# resurrecting: Recovery selects the newest record copy purely by RSN. At
+# the end of the recovery, the leader is the dmaster for all
+# records in all (non-persistent) databases. And the other nodes locally
+# hold the complete copy of the databases. The bug is that the recovery
+# process does not increment the RSN on the leader at the end of
+# the recovery. Now clients acting directly on the leader will
+# directly change a record's content on the leader without migration
+# and hence without RSN bump. So a subsequent recovery can not tell that
+# the leader's copy is newer than the copies on the other nodes, since
+# their RSN is the same. Hence, if the leader is not node 0 (or more
+# precisely not the active node with the lowest node number), the recovery
+# will choose copies from nodes with lower number and stick to these.
+
+# 1. Create a test database
+# 2. Add a record with value value1 on leader
+# 3. Force a recovery
+# 4. Update the record with value value2 on leader
+# 5. Force a recovery
+# 6. Confirm that the value is value2
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+#
+# Main test
+#
+TESTDB="rec_test.tdb"
+
+status=0
+
+# Make sure node 0 is not the leader
+echo "find out which node is leader"
+ctdb_onnode 0 leader
+leader="$out"
+if [ "$leader" = "0" ]; then
+ echo "node 0 is leader, disable leader role on node 0"
+ #
+ # Note:
+ # It should be sufficient to run "ctdb setleaderrole off"
+ # on node 0 and wait for election and recovery to finish.
+ # But there were problems related to this in this automatic
+ # test, so for now use "ctdb stop" and "ctdb continue".
+ #
+ echo "stop node 0"
+ try_command_on_node 0 $CTDB stop
+ wait_until_node_has_status 0 stopped
+ echo "continue node 0"
+ try_command_on_node 0 $CTDB continue
+ wait_until_node_has_status 0 notstopped
+
+ ctdb_onnode 0 leader
+ leader="$out"
+ if [ "$leader" = "0" ]; then
+ echo "failed to move leader to different node"
+ exit 1
+ fi
+fi
+
+echo "Leader:${leader}"
+
+# Create a temporary non-persistent database to test with
+echo "create test database $TESTDB"
+ctdb_onnode "$leader" attach "$TESTDB"
+
+# Wipe Test database
+echo "wipe test database"
+ctdb_onnode "$leader" wipedb "$TESTDB"
+
+# Add a record key=test1 data=value1
+echo "store key(test1) data(value1)"
+ctdb_onnode "$leader" writekey "$TESTDB" test1 value1
+
+# Fetch a record key=test1
+echo "read key(test1)"
+ctdb_onnode "$leader" readkey "$TESTDB" test1
+cat "$outfile"
+
+# Do a recovery
+echo "force recovery"
+ctdb_onnode "$leader" recover
+
+wait_until_node_has_status "$leader" recovered
+
+# Add a record key=test1 data=value2
+echo "store key(test1) data(value2)"
+ctdb_onnode "$leader" writekey "$TESTDB" test1 value2
+
+# Fetch a record key=test1
+echo "read key(test1)"
+ctdb_onnode "$leader" readkey "$TESTDB" test1
+cat "$outfile"
+
+# Do a recovery
+echo "force recovery"
+ctdb_onnode "$leader" recover
+
+wait_until_node_has_status "$leader" recovered
+
+# Verify record key=test1
+echo "read key(test1)"
+ctdb_onnode "$leader" readkey "$TESTDB" test1
+cat "$outfile"
+if [ "$out" = "Data: size:6 ptr:[value2]" ]; then
+ echo "GOOD: Recovery did not corrupt database"
+else
+ echo "BAD: Recovery corrupted database"
+ status=1
+fi
+
+exit $status
diff --git a/ctdb/tests/INTEGRATION/database/recovery.002.large.sh b/ctdb/tests/INTEGRATION/database/recovery.002.large.sh
new file mode 100755
index 0000000..4736071
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/recovery.002.large.sh
@@ -0,0 +1,106 @@
+#!/usr/bin/env bash
+
+# Test recovery of large volatile and persistent databases
+
+# Recovery now uses DB_PULL and DB_PUSH_START/DB_PUSH_CONFIRM
+# controls. This sends the records in batches of ~RecBufferSizeLimit
+# in size at a time. Test that large databases are re-assembled
+# correctly.
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+#
+# Main test
+#
+TEST1DB="large_persistent_db.tdb"
+TEST2DB="large_volatile_db.tdb"
+RECDATA=$(onnode 0 mktemp)
+
+# Create a persistent database to test
+echo "create persistent test database $TEST1DB"
+try_command_on_node 0 $CTDB attach $TEST1DB persistent
+
+# Wipe Test database
+echo "wipe test database $TEST1DB"
+try_command_on_node 0 $CTDB wipedb $TEST1DB
+
+# Create dummy record data
+echo "creating dummy record data"
+onnode 0 dd if=/dev/urandom of=$RECDATA bs=10K count=1
+
+# Add 345 records
+echo "Adding 345 records"
+for i in $(seq 1 345) ; do
+ try_command_on_node 0 $CTDB pstore $TEST1DB record$i $RECDATA || exit 1
+done
+
+num_records=$(db_ctdb_cattdb_count_records 0 $TEST1DB)
+if [ $num_records = "345" ] ; then
+ echo "OK: records added correctly"
+else
+ echo "BAD: persistent database has $num_records of 345 records"
+ try_command_on_node -v 0 "$CTDB cattdb $TEST1DB | tail -n 1"
+ exit 1
+fi
+
+# Create a volatile database to test
+echo "create volatile test database $TEST2DB"
+try_command_on_node 0 $CTDB attach $TEST2DB
+
+# Wipe Test database
+echo "wipe test database $TEST2DB"
+try_command_on_node 0 $CTDB wipedb $TEST2DB
+
+# Create dummy record data
+v1="1234567890"
+v2="$v1$v1$v1$v1$v1$v1$v1$v1$v1$v1"
+v3="$v2$v2$v2$v2$v2$v2$v2$v2$v2$v2"
+
+# Add 1234 records
+echo "Adding 1234 records"
+for i in $(seq 1 1234) ; do
+ try_command_on_node 0 $CTDB writekey $TEST2DB record$i $v3 || exit 1
+done
+
+num_records=$(db_ctdb_cattdb_count_records 0 $TEST2DB)
+if [ $num_records = "1234" ] ; then
+ echo "OK: records added correctly"
+else
+ echo "BAD: volatile database has $num_records of 1234 records"
+ try_command_on_node -v 0 "$CTDB cattdb $TEST2DB | tail -n 1"
+ exit 1
+fi
+
+echo
+leader_get 0
+# Set RecBufferSizeLimit to 10000
+ctdb_onnode "$leader" setvar RecBufferSizeLimit 10000
+
+# Do a recovery
+echo "force recovery"
+try_command_on_node 0 $CTDB recover
+
+wait_until_node_has_status 0 recovered 30
+
+# check that there are correct number of records
+num_records=$(db_ctdb_cattdb_count_records 0 $TEST1DB)
+if [ $num_records = "345" ] ; then
+ echo "OK: persistent database recovered correctly"
+else
+ echo "BAD: persistent database has $num_records of 345 records"
+ try_command_on_node -v 0 "$CTDB cattdb $TEST1DB | tail -n 1"
+ exit 1
+fi
+
+num_records=$(db_ctdb_cattdb_count_records 0 $TEST2DB)
+if [ $num_records = "1234" ] ; then
+ echo "OK: volatile database recovered correctly"
+else
+ echo "BAD: volatile database has $num_records of 1234 records"
+ try_command_on_node -v 0 "$CTDB cattdb $TEST2DB | tail -n 1"
+ exit 1
+fi
diff --git a/ctdb/tests/INTEGRATION/database/recovery.003.no_resurrect.sh b/ctdb/tests/INTEGRATION/database/recovery.003.no_resurrect.sh
new file mode 100755
index 0000000..b314d4d
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/recovery.003.no_resurrect.sh
@@ -0,0 +1,63 @@
+#!/usr/bin/env bash
+
+# Ensure recovery doesn't resurrect deleted records from recently
+# inactive nodes
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+testdb="rec_test.tdb"
+
+echo "Getting list of nodes..."
+ctdb_get_all_pnns
+
+first=$(echo "$all_pnns" | sed -n -e '1p')
+second=$(echo "$all_pnns" | sed -n -e '2p')
+notfirst=$(echo "$all_pnns" | tail -n +2)
+
+echo "Create/wipe test database ${testdb}"
+try_command_on_node $first $CTDB attach "$testdb"
+try_command_on_node $first $CTDB wipedb "$testdb"
+
+echo "store key(test1) data(value1)"
+try_command_on_node $first $CTDB writekey "$testdb" test1 value1
+
+echo "Migrate key(test1) to all nodes"
+try_command_on_node all $CTDB readkey "$testdb" test1
+
+echo "Stop node ${first}"
+try_command_on_node $first $CTDB stop
+wait_until_node_has_status $first stopped
+
+echo "Delete key(test1)"
+try_command_on_node $second $CTDB deletekey "$testdb" test1
+
+database_has_zero_records ()
+{
+ # shellcheck disable=SC2086
+ # $notfirst can be multi-word
+ check_cattdb_num_records "$testdb" 0 "$notfirst"
+}
+
+echo "Trigger a recovery"
+try_command_on_node "$second" $CTDB recover
+
+echo "Checking that database has 0 records"
+database_has_zero_records
+
+echo "Continue node ${first}"
+try_command_on_node $first $CTDB continue
+wait_until_node_has_status $first notstopped
+
+echo "Get database contents"
+try_command_on_node -v $first $CTDB catdb "$testdb"
+
+if grep -q '^key(' "$outfile" ; then
+ echo "BAD: Deleted record has been resurrected"
+ exit 1
+fi
+
+echo "GOOD: Deleted record is still gone"
diff --git a/ctdb/tests/INTEGRATION/database/recovery.010.persistent.sh b/ctdb/tests/INTEGRATION/database/recovery.010.persistent.sh
new file mode 100755
index 0000000..d13a9a5
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/recovery.010.persistent.sh
@@ -0,0 +1,103 @@
+#!/usr/bin/env bash
+
+# Ensure that persistent databases are correctly recovered by database
+# sequence number
+#
+# 1. Create and wipe a persistent test database
+# 2. Directly add a single record to the database on each node
+# 3. Trigger a recover
+# 4. Ensure that the database contains only a single record
+#
+# Repeat but with sequence numbers set by hand on each node
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+try_command_on_node 0 "$CTDB listnodes | wc -l"
+num_nodes="$out"
+
+add_record_per_node ()
+{
+ _i=0
+ while [ $_i -lt $num_nodes ] ; do
+ _k="KEY${_i}"
+ _d="DATA${_i}"
+ echo "Store key(${_k}) data(${_d}) on node ${_i}"
+ db_ctdb_tstore $_i "$test_db" "$_k" "$_d"
+ _i=$(($_i + 1))
+ done
+}
+
+test_db="persistent_test.tdb"
+echo "Create persistent test database \"$test_db\""
+try_command_on_node 0 $CTDB attach "$test_db" persistent
+
+# 3,
+# If no __db_sequence_number__ recover whole database
+#
+
+echo
+echo "Test that no __db_sequence_number__ does not blend the database during recovery"
+
+# wipe database
+echo "Wipe the test database"
+try_command_on_node 0 $CTDB wipedb "$test_db"
+
+add_record_per_node
+
+# force a recovery
+echo force a recovery
+try_command_on_node 0 $CTDB recover
+
+# Check that we now have 1 record on node 0
+num_records=$(db_ctdb_cattdb_count_records 0 "$test_db")
+if [ $num_records = "1" ] ; then
+ echo "OK: databases were not blended"
+else
+ echo "BAD: we did not end up with the expected single record after the recovery"
+ exit 1
+fi
+
+
+# 4,
+# If __db_sequence_number__ recover whole database
+#
+
+echo
+echo test that __db_sequence_number__ does not blend the database during recovery
+
+# wipe database
+echo wipe the test database
+try_command_on_node 0 $CTDB wipedb persistent_test.tdb
+
+add_record_per_node
+
+echo "Add __db_sequence_number__==5 record to all nodes"
+pnn=0
+while [ $pnn -lt $num_nodes ] ; do
+ db_ctdb_tstore_dbseqnum $pnn "$test_db" 5
+ pnn=$(($pnn + 1))
+done
+
+echo "Set __db_sequence_number__ to 7 on node 0"
+db_ctdb_tstore_dbseqnum 0 "$test_db" 7
+
+echo "Set __db_sequence_number__ to 8 on node 1"
+db_ctdb_tstore_dbseqnum 1 "$test_db" 8
+
+
+# force a recovery
+echo force a recovery
+try_command_on_node 0 $CTDB recover
+
+# check that we now have both records on node 0
+num_records=$(db_ctdb_cattdb_count_records 0 "$test_db")
+if [ $num_records = "1" ] ; then
+ echo "OK: databases were not blended"
+else
+ echo "BAD: we did not end up with the expected single record after the recovery"
+ exit 1
+fi
diff --git a/ctdb/tests/INTEGRATION/database/recovery.011.continue.sh b/ctdb/tests/INTEGRATION/database/recovery.011.continue.sh
new file mode 100755
index 0000000..3376e06
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/recovery.011.continue.sh
@@ -0,0 +1,73 @@
+#!/usr/bin/env bash
+
+# Confirm that the deleted records are not resurrected after recovery
+#
+# 1. Create a persistent database
+# 2. Add a record and update it few times.
+# 3. Delete the record
+# 4. Use "ctdb stop" to stop one of the nodes
+# 5. Add a record with same key.
+# 6. Continue on the stopped node
+# 7. Confirm that the record still exists
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+do_test()
+{
+# Wipe Test database
+echo "wipe test database"
+try_command_on_node 0 $CTDB wipedb $TESTDB
+
+# Add a record key=test1 data=value1
+# and update values
+for value in value1 value2 value3 value4 value5 ; do
+ echo "store key(test1) data($value)"
+ echo "\"test1\" \"$value\"" | try_command_on_node -i 0 $CTDB ptrans "$TESTDB"
+done
+
+# Delete record
+echo "delete key(test1)"
+try_command_on_node 0 $CTDB pdelete $TESTDB test1
+
+# Stop a node
+echo "stop node 1"
+try_command_on_node 1 $CTDB stop
+
+wait_until_node_has_status 1 stopped
+
+# Add a record key=test1 data=value2
+echo "store key(test1) data(newvalue1)"
+echo '"test1" "newvalue1"' | try_command_on_node -i 0 $CTDB ptrans "$TESTDB"
+
+# Continue node
+echo "contine node 1"
+try_command_on_node 1 $CTDB continue
+
+wait_until_node_has_status 1 notstopped
+
+}
+
+#
+# Main test
+#
+TESTDB="persistent_test.tdb"
+
+status=0
+
+# Create a temporary persistent database to test with
+echo "create persistent test database $TESTDB"
+try_command_on_node 0 $CTDB attach $TESTDB persistent
+
+do_test
+if try_command_on_node 0 $CTDB pfetch $TESTDB test1 ; then
+ echo "GOOD: Record was not deleted (recovery by sequence number worked)"
+else
+ echo "BAD: Record was deleted"
+ status=1
+fi
+
+exit $status
diff --git a/ctdb/tests/INTEGRATION/database/scripts/local.bash b/ctdb/tests/INTEGRATION/database/scripts/local.bash
new file mode 100644
index 0000000..ae2e0d5
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/scripts/local.bash
@@ -0,0 +1,116 @@
+# Hey Emacs, this is a -*- shell-script -*- !!! :-)
+
+check_cattdb_num_records ()
+{
+ local db="$1"
+ local num="$2"
+ local nodes="$3"
+
+ # $nodes has embedded newlines - put list on 1 line for printing
+ local t
+ t=$(echo "$nodes" | xargs)
+ echo "Confirm that ${db} has ${num} record(s) on node(s): ${t}"
+
+ local ret=0
+ local node
+ for node in $nodes ; do
+ local num_found
+
+ num_found=$(db_ctdb_cattdb_count_records "$node" "$db")
+ if [ "$num_found" = "$num" ] ; then
+ continue
+ fi
+
+ printf 'BAD: %s on node %d has %d record(s), expected %d\n' \
+ "$db" "$node" "$num_found" "$num"
+ ctdb_onnode -v "$node" "cattdb $db"
+ ret=1
+ done
+
+ return $ret
+}
+
+_key_dmaster_check ()
+{
+ local node="$1"
+ local db="$2"
+ local key="$3"
+ local dmaster="${4:-${node}}"
+
+ testprog_onnode "$node" "ctdb-db-test local-read ${db} ${key}"
+
+ # shellcheck disable=SC2154
+ # $outfile is set above by try_command_on_node()
+ grep -Fqx "dmaster: ${dmaster}" "$outfile"
+}
+
+_key_dmaster_fail ()
+{
+ local dmaster="$1"
+
+ echo "BAD: node ${dmaster} is not dmaster"
+ # shellcheck disable=SC2154
+ # $outfile is set by the caller via _key_dmaster_check()
+ cat "$outfile"
+ ctdb_test_fail
+}
+
+vacuum_test_key_dmaster ()
+{
+ local node="$1"
+ local db="$2"
+ local key="$3"
+ local dmaster="${4:-${node}}"
+
+ if ! _key_dmaster_check "$node" "$db" "$key" "$dmaster" ; then
+ _key_dmaster_fail "$dmaster"
+ fi
+}
+
+vacuum_test_wait_key_dmaster ()
+{
+ local node="$1"
+ local db="$2"
+ local key="$3"
+ local dmaster="${4:-${node}}"
+
+ if ! wait_until 30 \
+ _key_dmaster_check "$node" "$db" "$key" "$dmaster" ; then
+ _key_dmaster_fail "$dmaster"
+ fi
+}
+
+vacuum_confirm_key_empty_dmaster ()
+{
+ local node="$1"
+ local db="$2"
+ local key="$3"
+ local dmaster="${4:-${node}}"
+
+ echo "Confirm record key=\"${key}\" is empty and dmaster=${dmaster}"
+
+ vacuum_test_key_dmaster "$node" "$db" "$key" "$dmaster"
+
+ if ! grep -Fqx 'data(0) = ""' "$outfile" ; then
+ echo "BAD: record not empty"
+ cat "$outfile"
+ ctdb_test_fail
+ fi
+}
+
+db_confirm_key_has_value ()
+{
+ local node="$1"
+ local db="$2"
+ local key="$3"
+ local val="$4"
+
+ local out
+
+ ctdb_onnode "$node" "readkey ${db} ${key}"
+ outv=$(echo "$out" | sed -n 's|^Data: size:.* ptr:\[\(.*\)\]$|\1|p')
+ if [ "$val" != "$outv" ] ; then
+ ctdb_test_fail \
+ "BAD: value for \"${key}\"=\"${outv}\" (not \"${val}\")"
+ fi
+}
diff --git a/ctdb/tests/INTEGRATION/database/transaction.001.ptrans.sh b/ctdb/tests/INTEGRATION/database/transaction.001.ptrans.sh
new file mode 100755
index 0000000..556e523
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/transaction.001.ptrans.sh
@@ -0,0 +1,110 @@
+#!/usr/bin/env bash
+
+# Verify that the 'ctdb ptrans' works as expected
+#
+# Pipe some operation to ctdb ptrans and validate the TDB contents
+# with ctdb catdb
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+TESTDB="ptrans_test.tdb"
+
+# Create a temporary persistent database to test with
+echo "create persistent test database $TESTDB"
+try_command_on_node 0 $CTDB attach $TESTDB persistent
+
+# Wipe Test database
+echo "wipe test database"
+try_command_on_node 0 $CTDB wipedb $TESTDB
+
+##########
+
+echo "Adding 3 records"
+
+items='
+"key1" "value1"
+"key2" "value1"
+"key3" "value1"'
+
+echo "$items" | try_command_on_node -i 0 $CTDB ptrans "$TESTDB"
+
+try_command_on_node 0 $CTDB catdb "$TESTDB"
+
+n=$(grep -c '^key.*= "key.*"' "$outfile" || true)
+
+if [ $n -ne 3 ] ; then
+ echo "BAD: expected 3 keys in..."
+ cat "$outfile"
+ exit 1
+else
+ echo "GOOD: 3 records were inserted"
+fi
+
+##########
+
+echo "Deleting 1 record, updating 1, adding 1 new record, 1 bogus input line"
+
+items='
+"key1" ""
+"key2" "value2"
+"key3"
+"key4" "value1"'
+
+echo "$items" | try_command_on_node -i 0 $CTDB ptrans "$TESTDB"
+
+try_command_on_node 0 $CTDB catdb "$TESTDB"
+
+n=$(grep -c '^key.*= "key.*"' "$outfile" || true)
+
+if [ $n -ne 3 ] ; then
+ echo "BAD: expected 3 keys in..."
+ cat "$outfile"
+ exit 1
+else
+ echo "GOOD: 3 records found"
+fi
+
+##########
+
+echo "Verifying records"
+
+while read key value ; do
+ try_command_on_node 0 $CTDB pfetch "$TESTDB" "$key"
+ if [ "$value" != "$out" ] ; then
+ echo "BAD: for key \"$key\" expected \"$value\" but got \"$out\""
+ exit 1
+ else
+ echo "GOOD: for key \"$key\" got \"$out\""
+ fi
+done <<EOF
+key2 value2
+key3 value1
+key4 value1
+EOF
+
+##########
+
+echo "Deleting all records"
+
+items='
+"key2" ""
+"key3" ""
+"key4" ""'
+
+echo "$items" | try_command_on_node -i 0 $CTDB ptrans "$TESTDB"
+
+try_command_on_node 0 $CTDB catdb "$TESTDB"
+
+n=$(grep -c '^key.*= "key.*"' "$outfile" || true)
+
+if [ $n -ne 0 ] ; then
+ echo "BAD: expected 0 keys in..."
+ cat "$outfile"
+ exit 1
+else
+ echo "GOOD: 0 records found"
+fi
diff --git a/ctdb/tests/INTEGRATION/database/transaction.002.loop.sh b/ctdb/tests/INTEGRATION/database/transaction.002.loop.sh
new file mode 100755
index 0000000..d633c7c
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/transaction.002.loop.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+# Verify that the transaction_loop test succeeds
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+TESTDB="persistent_trans.tdb"
+
+try_command_on_node 0 "$CTDB attach $TESTDB persistent"
+try_command_on_node 0 "$CTDB wipedb $TESTDB"
+
+try_command_on_node 0 "$CTDB listnodes | wc -l"
+num_nodes="$out"
+
+if [ -z "$CTDB_TEST_TIMELIMIT" ] ; then
+ CTDB_TEST_TIMELIMIT=30
+fi
+
+t="$CTDB_TEST_WRAPPER $VALGRIND transaction_loop \
+ -n ${num_nodes} -t ${CTDB_TEST_TIMELIMIT} \
+ -D ${TESTDB} -T persistent -k testkey"
+
+echo "Running transaction_loop on all $num_nodes nodes."
+try_command_on_node -v -p all "$t"
diff --git a/ctdb/tests/INTEGRATION/database/transaction.003.loop_recovery.sh b/ctdb/tests/INTEGRATION/database/transaction.003.loop_recovery.sh
new file mode 100755
index 0000000..05aadba
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/transaction.003.loop_recovery.sh
@@ -0,0 +1,50 @@
+#!/usr/bin/env bash
+
+# Verify that the transaction_loop test succeeds with recoveries.
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+recovery_loop()
+{
+ local COUNT=1
+
+ while true ; do
+ echo Recovery $COUNT
+ try_command_on_node 0 $CTDB recover
+ sleep 2
+ COUNT=$((COUNT + 1))
+ done
+}
+
+recovery_loop_start()
+{
+ recovery_loop >/dev/null &
+ RECLOOP_PID=$!
+ ctdb_test_exit_hook_add "kill $RECLOOP_PID >/dev/null 2>&1"
+}
+
+TESTDB="persistent_trans.tdb"
+
+try_command_on_node 0 "$CTDB attach $TESTDB persistent"
+try_command_on_node 0 "$CTDB wipedb $TESTDB"
+
+try_command_on_node 0 "$CTDB listnodes | wc -l"
+num_nodes="$out"
+
+if [ -z "$CTDB_TEST_TIMELIMIT" ] ; then
+ CTDB_TEST_TIMELIMIT=30
+fi
+
+t="$CTDB_TEST_WRAPPER $VALGRIND transaction_loop \
+ -n ${num_nodes} -t ${CTDB_TEST_TIMELIMIT} \
+ -D ${TESTDB} -T persistent -k testkey"
+
+echo "Starting recovery loop"
+recovery_loop_start
+
+echo "Running transaction_loop on all $num_nodes nodes."
+try_command_on_node -v -p all "$t"
diff --git a/ctdb/tests/INTEGRATION/database/transaction.004.update_record.sh b/ctdb/tests/INTEGRATION/database/transaction.004.update_record.sh
new file mode 100755
index 0000000..528303a
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/transaction.004.update_record.sh
@@ -0,0 +1,80 @@
+#!/usr/bin/env bash
+
+# Verify that "ctdb update_record_persistent" creates new records and
+# updates existing records in a persistent database
+#
+# 1. Create and wipe a persistent test database
+# 2. Do a recovery
+# 3. Confirm that the database is empty
+# 4. Create a new record using "ctdb update_record_persistent"
+# 5. Confirm the record exists in the database using "ctdb cattdb"
+# 6. Update the record's value using "ctdb update_record_persistent"
+# 7. Confirm that the original value no longer exists using "ctdb cattdb"
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+try_command_on_node 0 "$CTDB listnodes | wc -l"
+num_nodes="$out"
+
+test_db="persistent_test.tdb"
+
+# create a temporary persistent database to test with
+echo "Create persistent test database \"$test_db\""
+try_command_on_node 0 $CTDB attach "$test_db" persistent
+
+
+# 3.
+echo "Wipe the persistent test database"
+try_command_on_node 0 $CTDB wipedb "$test_db"
+echo "Force a recovery"
+try_command_on_node 0 $CTDB recover
+
+# check that the database is wiped
+num_records=$(db_ctdb_cattdb_count_records 1 "$test_db")
+if [ $num_records = "0" ] ; then
+ echo "OK: database was wiped"
+else
+ echo "BAD: we did not end up with an empty database"
+ exit 1
+fi
+
+# 4.
+echo "Create a new record in the persistent database using UPDATE_RECORD"
+try_command_on_node 0 $CTDB_TEST_WRAPPER $VALGRIND update_record_persistent \
+ -D "$test_db" -k "Update_Record_Persistent" -v "FirstValue"
+
+try_command_on_node 0 "$CTDB cattdb "$test_db" | grep 'FirstValue' | wc -l"
+if [ "$out" = 1 ] ; then
+ echo "GOOD: we did not find the record after the create/update"
+else
+ echo "BAD: we did find the record after the create/update"
+ exit 1
+fi
+
+# 5.
+echo Modify an existing record in the persistent database using UPDATE_RECORD
+try_command_on_node 0 $CTDB_TEST_WRAPPER $VALGRIND update_record_persistent \
+ -D "$test_db" -k "Update_Record_Persistent" -v "SecondValue"
+
+try_command_on_node 0 "$CTDB cattdb "$test_db" | grep 'FirstValue' | wc -l"
+if [ "$out" = 0 ] ; then
+ echo "GOOD: did not find old record after the modify/update"
+else
+ echo "BAD: we still found the old record after the modify/update"
+ exit 1
+fi
+
+try_command_on_node 0 "$CTDB cattdb "$test_db" | grep 'SecondValue' | wc -l"
+if [ "$out" = 1 ] ; then
+ echo "GOOD: found the record after the modify/update"
+else
+ echo "BAD: could not find the record after the modify/update"
+ exit 1
+fi
+
+echo "Wipe the persistent test databases and clean up"
+try_command_on_node 0 $CTDB wipedb "$test_db"
diff --git a/ctdb/tests/INTEGRATION/database/transaction.010.loop_recovery.sh b/ctdb/tests/INTEGRATION/database/transaction.010.loop_recovery.sh
new file mode 100755
index 0000000..9de6c34
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/transaction.010.loop_recovery.sh
@@ -0,0 +1,51 @@
+#!/usr/bin/env bash
+
+# Verify that the transaction_loop test succeeds with recoveries for
+# replicated databases
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+recovery_loop()
+{
+ local COUNT=1
+
+ while true ; do
+ echo Recovery $COUNT
+ try_command_on_node 0 $CTDB recover
+ sleep 2
+ COUNT=$((COUNT + 1))
+ done
+}
+
+recovery_loop_start()
+{
+ recovery_loop >/dev/null &
+ RECLOOP_PID=$!
+ ctdb_test_exit_hook_add "kill $RECLOOP_PID >/dev/null 2>&1"
+}
+
+TESTDB="replicated_trans.tdb"
+
+try_command_on_node 0 "$CTDB attach $TESTDB replicated"
+try_command_on_node 0 "$CTDB wipedb $TESTDB"
+
+try_command_on_node 0 "$CTDB listnodes | wc -l"
+num_nodes="$out"
+
+if [ -z "$CTDB_TEST_TIMELIMIT" ] ; then
+ CTDB_TEST_TIMELIMIT=30
+fi
+
+t="$CTDB_TEST_WRAPPER $VALGRIND transaction_loop \
+ -n ${num_nodes} -t ${CTDB_TEST_TIMELIMIT} \
+ -D ${TESTDB} -T replicated -k testkey"
+
+echo "Starting recovery loop"
+recovery_loop_start
+
+echo "Running transaction_loop on all $num_nodes nodes."
+try_command_on_node -v -p all "$t"
diff --git a/ctdb/tests/INTEGRATION/database/traverse.001.one.sh b/ctdb/tests/INTEGRATION/database/traverse.001.one.sh
new file mode 100755
index 0000000..1b3b7c2
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/traverse.001.one.sh
@@ -0,0 +1,116 @@
+#!/usr/bin/env bash
+
+# Confirm that traverses of volatile databases work as expected
+
+# This is a very simple example. It writes a single record, updates it
+# on another node and then confirms that the correct value is found when
+# traversing. It then repeats this after removing the LMASTER role from
+# the node where the value is updated.
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+#
+# Main test
+#
+TESTDB="traverse_db.tdb"
+
+echo "create volatile test database $TESTDB"
+try_command_on_node 0 $CTDB attach "$TESTDB"
+
+echo "wipe test database $TESTDB"
+try_command_on_node 0 $CTDB wipedb "$TESTDB"
+
+echo "write foo=bar0 on node 0"
+try_command_on_node 0 $CTDB writekey "$TESTDB" "foo" "bar0"
+
+echo "write foo=bar1 on node 1"
+try_command_on_node 1 $CTDB writekey "$TESTDB" "foo" "bar1"
+
+echo
+
+check_db_num_records ()
+{
+ local node="$1"
+ local db="$2"
+ local n="$3"
+
+ echo "Checking on node ${node} to ensure ${db} has ${n} records..."
+ try_command_on_node "$node" "${CTDB} catdb ${db}"
+
+ num=$(sed -n -e 's|^Dumped \(.*\) records$|\1|p' "$outfile")
+ if [ "$num" = "$n" ] ; then
+ echo "OK: Number of records=${num}"
+ echo
+ else
+ echo "BAD: There were ${num} (!= ${n}) records"
+ cat "$outfile"
+ exit 1
+ fi
+}
+
+check_db_num_records 0 "$TESTDB" 1
+check_db_num_records 1 "$TESTDB" 1
+
+cat <<EOF
+
+Again, this time with 10 records, rewriting 5 of them on the 2nd node
+
+EOF
+
+echo "wipe test database $TESTDB"
+try_command_on_node 0 $CTDB wipedb "$TESTDB"
+
+for i in $(seq 0 9) ; do
+ k="foo${i}"
+ v="bar${i}@0"
+ echo "write ${k}=${v} on node 0"
+ try_command_on_node 0 "${CTDB} writekey ${TESTDB} ${k} ${v}"
+done
+
+for i in $(seq 1 5) ; do
+ k="foo${i}"
+ v="bar${i}@1"
+ echo "write ${k}=${v} on node 1"
+ try_command_on_node 1 "${CTDB} writekey ${TESTDB} ${k} ${v}"
+done
+
+check_db_num_records 0 "$TESTDB" 10
+check_db_num_records 1 "$TESTDB" 10
+
+cat <<EOF
+
+Again, this time with lmaster role off on node 1
+
+EOF
+
+echo "wipe test database $TESTDB"
+try_command_on_node 0 $CTDB wipedb "$TESTDB"
+
+echo "switching off lmaster role on node 1"
+try_command_on_node 1 $CTDB setlmasterrole off
+
+try_command_on_node -v 1 $CTDB getcapabilities
+
+wait_until_node_has_status 1 notlmaster 10 0
+
+echo "write foo=bar0 on node 0"
+try_command_on_node 0 $CTDB writekey "$TESTDB" "foo" "bar0"
+
+echo "write foo=bar1 on node 1"
+try_command_on_node 1 $CTDB writekey "$TESTDB" "foo" "bar1"
+
+echo
+
+check_db_num_records 0 "$TESTDB" 1
+check_db_num_records 1 "$TESTDB" 1
+
+if grep -q "^data(4) = \"bar1\"\$" "$outfile" ; then
+ echo "OK: Data from node 1 was returned"
+else
+ echo "BAD: Data from node 1 was not returned"
+ exit 1
+fi
diff --git a/ctdb/tests/INTEGRATION/database/traverse.002.many.sh b/ctdb/tests/INTEGRATION/database/traverse.002.many.sh
new file mode 100755
index 0000000..fb0dc98
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/traverse.002.many.sh
@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+# Test cluster wide traverse code
+#
+# 1. Create a volatile test database
+# 2. Add records on different nodes
+# 3. Use "ctdb catdb" to confirm that all added records are present
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+try_command_on_node 0 "$CTDB listnodes"
+num_nodes=$(echo "$out" | wc -l)
+
+num_records=1000
+
+TESTDB="traverse_test.tdb"
+
+echo "create test database $TESTDB"
+try_command_on_node 0 $CTDB attach $TESTDB
+
+echo "wipe test database $TESTDB"
+try_command_on_node 0 $CTDB wipedb $TESTDB
+
+echo "Add $num_records records to database"
+i=0
+while [ $i -lt $num_records ]; do
+ key=$(printf "key-%04x" $i)
+ value="value-$i"
+
+ n=$[ $i % $num_nodes ]
+ try_command_on_node $n $CTDB writekey $TESTDB $key $value
+
+ i=$[ $i + 1 ]
+done
+
+echo "Start a traverse and collect records"
+try_command_on_node 0 $CTDB catdb $TESTDB
+
+num_read=$(tail -n 1 "$outfile" | cut -d\ -f2)
+if [ $num_read -eq $num_records ]; then
+ echo "GOOD: All $num_records records retrieved"
+ status=0
+else
+ echo "BAD: Only $num_read/$num_records records retrieved"
+ status=1
+fi
+
+exit $status
diff --git a/ctdb/tests/INTEGRATION/database/vacuum.001.fast.sh b/ctdb/tests/INTEGRATION/database/vacuum.001.fast.sh
new file mode 100755
index 0000000..27a2225
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/vacuum.001.fast.sh
@@ -0,0 +1,159 @@
+#!/usr/bin/env bash
+
+# Ensure that vacuuming deletes records on all nodes
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+vacuum_test ()
+{
+ local db="$1"
+ local num_records="$2"
+ local delete_from_lmaster="${3:-false}"
+
+ local t
+ if "$delete_from_lmaster" ; then
+ t="lmaster"
+ else
+ t="non-lmaster"
+ fi
+
+ echo
+ echo '............................................................'
+ printf 'Creating %d record(s)\n' "$num_records"
+ printf 'Testing vacuuming of 1 record deleted from %s\n' "$t"
+ echo '............................................................'
+
+ echo
+ echo "Stall vacuuming on all nodes"
+ ctdb_onnode -p all "setvar VacuumInterval 99999"
+
+ echo
+ echo "Getting list of nodes..."
+ local all_pnns
+ ctdb_get_all_pnns
+
+ local first
+ first=$(echo "$all_pnns" | sed -n -e '1p')
+
+ echo
+ echo "Create/wipe test database ${db}"
+ ctdb_onnode "$first" "attach ${db}"
+ ctdb_onnode "$first" "wipedb ${db}"
+
+ echo
+ echo "Write ${num_records} records to ${db}"
+ local i
+ for i in $(seq 1 "$num_records") ; do
+ ctdb_onnode "$first" "writekey ${db} test${i} value${i}"
+ done
+
+ echo
+ echo "Migrate record(s) to all nodes"
+ for i in $(seq 1 "$num_records") ; do
+ ctdb_onnode all "readkey ${db} test${i}"
+ done
+
+ echo
+ echo "Confirm that all nodes have all the records"
+ check_cattdb_num_records "$db" "$num_records" "$all_pnns"
+
+ local key="test1"
+ echo
+ echo "Delete key ${key}"
+
+ echo " Find lmaster for key \"${key}\""
+ testprog_onnode "$first" "ctdb-db-test get-lmaster ${key}"
+ # out is set above
+ # shellcheck disable=SC2154
+ lmaster="$out"
+ echo " lmaster=${lmaster}"
+
+ if "$delete_from_lmaster" ; then
+ echo " Delete key ${key} on lmaster node ${lmaster}"
+ dnode="$lmaster"
+ else
+ for i in $all_pnns ; do
+ if [ "$i" != "$lmaster" ] ; then
+ dnode="$i"
+ break
+ fi
+ done
+ echo " Delete key ${key} on non-lmaster node ${dnode}"
+ fi
+ ctdb_onnode "$dnode" "deletekey ${db} ${key}"
+
+ echo
+ vacuum_confirm_key_empty_dmaster "$dnode" "$db" "$key"
+
+ echo
+ echo "Confirm all records still exist on all nodes"
+ check_cattdb_num_records "$db" "$num_records" "$all_pnns"
+
+ if ! "$delete_from_lmaster" ; then
+ # Ask the lmaster to fetch the deleted record
+ echo
+ echo "Vacuum on non-lmaster node ${dnode}"
+ testprog_onnode "$dnode" "ctdb-db-test vacuum ${db}"
+
+ echo
+ vacuum_confirm_key_empty_dmaster "$dnode" "$db" "$key"
+
+ # Fetch the record and put it in the delete queue in
+ # the main daemon for processing in next vacuuming run
+ # on the lmaster
+ echo
+ echo "Vacuum on lmaster node ${lmaster}"
+ testprog_onnode "$lmaster" "ctdb-db-test vacuum ${db}"
+
+ echo
+ echo "Confirm all records still exist on all node nodes"
+ check_cattdb_num_records "$db" "$num_records" "$all_pnns"
+
+ echo
+ vacuum_confirm_key_empty_dmaster "$lmaster" "$db" "$key"
+ fi
+
+ echo
+ # In the delete-from-lmaster case, the record is already in
+ # the lmaster's delete-queue so only a single run is needed
+ echo "Vacuum on lmaster node ${lmaster}"
+ testprog_onnode "$lmaster" "ctdb-db-test vacuum ${db}"
+
+ echo
+ echo "Confirm a record has been deleted on all nodes"
+ local n=$((num_records - 1))
+ check_cattdb_num_records "$db" "$n" "$all_pnns"
+
+ echo
+ echo "Confirm all other records still exist with expected values"
+ local i
+ for i in $(seq 1 "$num_records") ; do
+ local k="test${i}"
+ local v="value${i}"
+
+ if [ "$k" = "$key" ] ; then
+ continue
+ fi
+
+ db_confirm_key_has_value "$first" "$db" "$k" "$v"
+ done
+ echo "GOOD"
+}
+
+testdb="vacuum_test.tdb"
+
+# 1 record, delete from non-lmaster
+vacuum_test "$testdb" 1 false
+
+# 10 records, delete from non-lmaster
+vacuum_test "$testdb" 10 false
+
+# 1 record, delete from lmaster
+vacuum_test "$testdb" 1 true
+
+# 10 records, delete from lmaster
+vacuum_test "$testdb" 10 true
diff --git a/ctdb/tests/INTEGRATION/database/vacuum.002.full.sh b/ctdb/tests/INTEGRATION/database/vacuum.002.full.sh
new file mode 100755
index 0000000..0dc8372
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/vacuum.002.full.sh
@@ -0,0 +1,96 @@
+#!/usr/bin/env bash
+
+# Ensure a full vacuuming run deletes records
+
+# Create some records, delete some of them on their lmaster (with a
+# test tool that doesn't do SCHEDULE_FOR_DELETION), run some fast
+# vacuuming runs (to ensure they don't delete records that haven't
+# been added to the delete queue) and then try a full vacuuming run,
+# which will actually do a traverse of the database to find empty
+# records and delete them. Confirm that records that haven't been
+# deleted are still there, with expected values.
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+db="vacuum_test.tdb"
+
+echo "Stall vacuuming on all nodes"
+ctdb_onnode -p all "setvar VacuumInterval 99999"
+
+echo
+echo "Getting list of nodes..."
+ctdb_get_all_pnns
+
+# all_pnns is set above by ctdb_get_all_pnns()
+# shellcheck disable=SC2154
+first=$(echo "$all_pnns" | sed -n -e '1p')
+
+echo
+echo "Create/wipe test database ${db}"
+ctdb_onnode "$first" "attach ${db}"
+ctdb_onnode "$first" "wipedb ${db}"
+
+echo
+echo "Create records in ${db}"
+for i in $(seq 1 10) ; do
+ ctdb_onnode "$first" "writekey ${db} delete${i} value${i}"
+ ctdb_onnode "$first" "writekey ${db} keep${i} value${i}"
+done
+
+echo
+echo "Migrate record(s) to all nodes"
+for i in $(seq 1 10) ; do
+ ctdb_onnode all "readkey ${db} delete${i}"
+ ctdb_onnode all "readkey ${db} keep${i}"
+done
+
+echo
+echo "Confirm that all nodes have all the records"
+check_cattdb_num_records "$db" 20 "$all_pnns"
+
+echo
+echo "Delete all 10 records from their lmaster node"
+for i in $(seq 1 10) ; do
+ key="delete${i}"
+
+ testprog_onnode "$first" "ctdb-db-test get-lmaster ${key}"
+ # $out is set above by testprog_onnode()
+ # shellcheck disable=SC2154
+ lmaster="$out"
+
+ echo
+ echo "Delete ${key} from lmaster node ${lmaster}"
+ testprog_onnode "$lmaster" \
+ "ctdb-db-test fetch-local-delete $db ${key}"
+
+ vacuum_confirm_key_empty_dmaster "$lmaster" "$db" "$key"
+done
+
+echo "Do fast vacuuming run on all nodes"
+testprog_onnode "all" "ctdb-db-test vacuum ${db}"
+
+echo
+echo "Confirm all records still exist on all nodes"
+check_cattdb_num_records "$db" 20 "$all_pnns"
+
+echo
+echo "Do full vacuuming run on all nodes"
+testprog_onnode "all" "ctdb-db-test vacuum ${db} full"
+
+echo
+echo "Confirm 10 records exist on all nodes"
+check_cattdb_num_records "$db" 10 "$all_pnns"
+
+echo
+echo "Confirm that remaining records still exist with expected values"
+for i in $(seq 1 10) ; do
+ k="keep${i}"
+ v="value${i}"
+
+ db_confirm_key_has_value "$first" "$db" "$k" "$v"
+done
+echo "GOOD"
diff --git a/ctdb/tests/INTEGRATION/database/vacuum.003.recreate.sh b/ctdb/tests/INTEGRATION/database/vacuum.003.recreate.sh
new file mode 100755
index 0000000..acb7b13
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/vacuum.003.recreate.sh
@@ -0,0 +1,139 @@
+#!/usr/bin/env bash
+
+# Ensure that vacuuming does not delete a record that is recreated
+# before vacuuming completes. This needs at least 3 nodes.
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+db="vacuum_test.tdb"
+
+echo "Stall vacuuming on all nodes"
+ctdb_onnode -p all "setvar VacuumInterval 99999"
+
+echo
+echo "Getting list of nodes..."
+ctdb_get_all_pnns
+
+# all_pnns is set above by ctdb_get_all_pnns()
+# shellcheck disable=SC2154
+first=$(echo "$all_pnns" | sed -n -e '1p')
+
+echo
+echo "Create/wipe test database ${db}"
+ctdb_onnode "$first" "attach ${db}"
+ctdb_onnode "$first" "wipedb ${db}"
+
+echo
+echo "Create a record in ${db}"
+ctdb_onnode "$first" "writekey ${db} key value1"
+
+echo
+echo "Migrate record to all nodes"
+ctdb_onnode all "readkey ${db} key"
+
+echo
+echo "Confirm that all nodes have the record"
+check_cattdb_num_records "$db" 1 "$all_pnns"
+
+echo
+echo "Determine lmaster node for key"
+testprog_onnode "$first" "ctdb-db-test get-lmaster key"
+# $out is set above by testprog_onnode()
+# shellcheck disable=SC2154
+lmaster="$out"
+echo "lmaster=${lmaster}"
+
+non_lmaster=""
+# Find a non-lmaster node
+for i in $all_pnns ; do
+ if [ "$i" != "$lmaster" ] ; then
+ non_lmaster="$i"
+ break
+ fi
+done
+if [ -z "$non_lmaster" ] ; then
+ ctdb_test_fail "Could not find non-lmaster node for key"
+fi
+
+another_non_lmaster=""
+# Find another non-lmaster node
+for i in $all_pnns ; do
+ if [ "$i" != "$lmaster" ] && [ "$i" != "$non_lmaster" ] ; then
+ another_non_lmaster="$i"
+ break
+ fi
+done
+if [ -z "$another_non_lmaster" ] ; then
+ ctdb_test_fail "Could not find another non-lmaster node for key"
+fi
+
+vacuum_test ()
+{
+ local db="$1"
+ local key="$2"
+ local val="$3"
+ local dnode="$4"
+ local rnode="$5"
+ local rrun="$6"
+
+ echo
+ echo '............................................................'
+ printf 'Delete key %s on node %d\n' "$key" "$dnode"
+ printf 'Recreate on node %d after %d vacuuming run(s)\n' \
+ "$rnode" "$rrun"
+ echo '............................................................'
+
+ echo
+ echo "Delete key \"${key}\" from node ${dnode}"
+ ctdb_onnode "$dnode" "deletekey ${db} ${key}"
+
+ if [ "$rrun" -eq 0 ] ; then
+ echo "Recreate record on node ${rnode}"
+ ctdb_onnode "$rnode" "writekey ${db} ${key} ${val}"
+ fi
+
+ echo "Do a fast vacuuming run on node ${dnode}"
+ testprog_onnode "$dnode" "ctdb-db-test vacuum ${db}"
+
+ if [ "$rrun" -eq 1 ] ; then
+ echo "Recreate record on node ${rnode}"
+ ctdb_onnode "$rnode" "writekey ${db} ${key} ${val}"
+ fi
+
+ echo "Do a fast vacuuming run on lmaster node ${lmaster}"
+ testprog_onnode "$lmaster" "ctdb-db-test vacuum ${db}"
+
+ if [ "$rrun" -eq 2 ] ; then
+ echo "Recreate record on node ${rnode}"
+ ctdb_onnode "$rnode" "writekey ${db} ${key} ${val}"
+ fi
+
+ echo "Do a fast vacuuming run on lmaster node ${lmaster}"
+ testprog_onnode "$lmaster" "ctdb-db-test vacuum ${db}"
+
+ echo
+ echo "Confirm the record still exists on all nodes"
+ check_cattdb_num_records "$db" 1 "$all_pnns"
+
+ echo
+ echo "Confirm the record contains correct value"
+ db_confirm_key_has_value "$first" "$db" "$key" "$val"
+}
+
+vacuum_test "$db" "key" "value01" "$non_lmaster" "$non_lmaster" 0
+vacuum_test "$db" "key" "value02" "$non_lmaster" "$another_non_lmaster" 0
+vacuum_test "$db" "key" "value03" "$non_lmaster" "$lmaster" 0
+vacuum_test "$db" "key" "value04" "$lmaster" "$non_lmaster" 0
+vacuum_test "$db" "key" "value05" "$lmaster" "$lmaster" 0
+
+vacuum_test "$db" "key" "value06" "$non_lmaster" "$non_lmaster" 1
+vacuum_test "$db" "key" "value07" "$non_lmaster" "$lmaster" 1
+vacuum_test "$db" "key" "value08" "$non_lmaster" "$another_non_lmaster" 1
+
+vacuum_test "$db" "key" "value09" "$non_lmaster" "$non_lmaster" 2
+vacuum_test "$db" "key" "value10" "$non_lmaster" "$lmaster" 2
+vacuum_test "$db" "key" "value11" "$non_lmaster" "$another_non_lmaster" 2
diff --git a/ctdb/tests/INTEGRATION/database/vacuum.030.locked.sh b/ctdb/tests/INTEGRATION/database/vacuum.030.locked.sh
new file mode 100755
index 0000000..3862526
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/vacuum.030.locked.sh
@@ -0,0 +1,102 @@
+#!/usr/bin/env bash
+
+# Confirm that a record is not vacuumed if it is locked when the 1st
+# fast vacuuming run occurs on the node on which it was deleted, but
+# is dropped from the delete queue
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+db="vacuum_test.tdb"
+key="key"
+
+echo "Stall vacuuming on all nodes"
+ctdb_onnode -p all "setvar VacuumInterval 99999"
+
+echo
+echo "Getting list of nodes..."
+ctdb_get_all_pnns
+
+# all_pnns is set above by ctdb_get_all_pnns()
+# shellcheck disable=SC2154
+first=$(echo "$all_pnns" | sed -n -e '1p')
+
+echo
+echo "Determine lmaster node for key"
+testprog_onnode "$first" "ctdb-db-test get-lmaster key"
+# $out is set above by testprog_onnode()
+# shellcheck disable=SC2154
+lmaster="$out"
+echo "lmaster=${lmaster}"
+
+non_lmaster=""
+# Find a non-lmaster node
+for i in $all_pnns ; do
+ if [ "$i" != "$lmaster" ] ; then
+ non_lmaster="$i"
+ break
+ fi
+done
+if [ -z "$non_lmaster" ] ; then
+ ctdb_test_fail "Could not find non-lmaster node for key"
+fi
+
+echo "............................................................"
+echo "Delete key ${key} on non-lmaster node ${non_lmaster}"
+echo "Lock on node ${non_lmaster} during 1st vacuuming run"
+echo "............................................................"
+
+echo
+
+echo "Create/wipe test database ${db}"
+ctdb_onnode "$first" "attach ${db}"
+ctdb_onnode "$first" "wipedb ${db}"
+
+echo "Create a record in ${db}"
+ctdb_onnode "$first" "writekey ${db} ${key} value1"
+
+echo "Migrate record to all nodes"
+ctdb_onnode all "readkey ${db} ${key}"
+
+echo "Confirm that all nodes have the record"
+check_cattdb_num_records "$db" 1 "$all_pnns"
+
+echo
+
+echo "Delete key \"${key}\" from node ${non_lmaster}"
+ctdb_onnode "$non_lmaster" "deletekey $db ${key}"
+
+echo "Lock record on node ${non_lmaster}"
+testprog_onnode "$non_lmaster" "ctdb-db-test local-lock ${db} ${key}"
+pid="${out#OK }"
+ctdb_test_cleanup_pid_set "$non_lmaster" "$pid"
+
+echo "Do a fast vacuuming run on node ${non_lmaster}"
+testprog_onnode "$non_lmaster" "ctdb-db-test vacuum ${db}"
+
+echo "Kill lock process ${pid} on node ${non_lmaster}"
+try_command_on_node "$non_lmaster" "kill ${pid}"
+ctdb_test_cleanup_pid_clear
+
+echo
+
+# If the record is still in the delete queue then this will process it
+echo "Do a fast vacuuming run on node ${non_lmaster}"
+testprog_onnode "$non_lmaster" "ctdb-db-test vacuum ${db}"
+
+echo "Do a fast vacuuming run on lmaster node ${lmaster}"
+testprog_onnode "$lmaster" "ctdb-db-test vacuum ${db}"
+
+echo "Do a fast vacuuming run on lmaster node ${lmaster}"
+testprog_onnode "$lmaster" "ctdb-db-test vacuum ${db}"
+
+echo
+
+echo "Confirm the record still exists on all nodes"
+check_cattdb_num_records "$db" 1 "$all_pnns"
+
+echo
+vacuum_confirm_key_empty_dmaster "$non_lmaster" "$db" "$key"
diff --git a/ctdb/tests/INTEGRATION/database/vacuum.031.locked.sh b/ctdb/tests/INTEGRATION/database/vacuum.031.locked.sh
new file mode 100755
index 0000000..d16482e
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/vacuum.031.locked.sh
@@ -0,0 +1,114 @@
+#!/usr/bin/env bash
+
+# Confirm that a record is vacuumed if it is locked on the deleting
+# node when the 2nd fast vacuuming run occurs, but vacuuming is
+# delayed until the lock is released
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+db="vacuum_test.tdb"
+key="key"
+
+echo "Stall vacuuming on all nodes"
+ctdb_onnode -p all "setvar VacuumInterval 99999"
+
+echo
+echo "Getting list of nodes..."
+ctdb_get_all_pnns
+
+# all_pnns is set above by ctdb_get_all_pnns()
+# shellcheck disable=SC2154
+first=$(echo "$all_pnns" | sed -n -e '1p')
+
+echo
+echo "Determine lmaster node for key"
+testprog_onnode "$first" "ctdb-db-test get-lmaster key"
+# $out is set above by testprog_onnode()
+# shellcheck disable=SC2154
+lmaster="$out"
+echo "lmaster=${lmaster}"
+
+non_lmaster=""
+# Find a non-lmaster node
+for i in $all_pnns ; do
+ if [ "$i" != "$lmaster" ] ; then
+ non_lmaster="$i"
+ break
+ fi
+done
+if [ -z "$non_lmaster" ] ; then
+ ctdb_test_fail "Could not find non-lmaster node for key"
+fi
+
+echo "............................................................"
+echo "Delete key ${key} on node ${non_lmaster}"
+echo "Lock on non-lmaster node ${non_lmaster} during 2nd vacuuming run"
+echo "............................................................"
+
+echo
+
+echo "Create/wipe test database ${db}"
+ctdb_onnode "$first" "attach ${db}"
+ctdb_onnode "$first" "wipedb ${db}"
+
+echo "Create a record in ${db}"
+ctdb_onnode "$first" "writekey ${db} ${key} value1"
+
+echo "Migrate record to all nodes"
+ctdb_onnode all "readkey ${db} ${key}"
+
+echo "Confirm that all nodes have the record"
+check_cattdb_num_records "$db" 1 "$all_pnns"
+
+echo
+
+echo "Delete key \"${key}\" from node ${non_lmaster}"
+ctdb_onnode "$non_lmaster" "deletekey $db ${key}"
+
+echo
+echo "Do a fast vacuuming run on node ${non_lmaster}"
+testprog_onnode "$non_lmaster" "ctdb-db-test vacuum ${db}"
+
+echo
+echo "Confirm that all nodes still have the record"
+check_cattdb_num_records "$db" 1 "$all_pnns"
+
+echo
+echo "Lock record on non-lmaster node ${non_lmaster}"
+testprog_onnode "$non_lmaster" "ctdb-db-test local-lock ${db} ${key}"
+pid="${out#OK }"
+ctdb_test_cleanup_pid_set "$non_lmaster" "$pid"
+
+echo
+echo "Do a fast vacuuming run on lmaster node ${lmaster} - THIS WILL FAIL"
+status=0
+testprog_onnode "$lmaster" "ctdb-db-test -t 10 vacuum ${db}" || status=$?
+
+if [ $status -ne 110 ] ; then
+ ctdb_test_fail "$out"
+fi
+
+echo "Confirm record key=\"${key}\" has dmaster=${non_lmaster}"
+vacuum_test_key_dmaster "$lmaster" "$db" "$key" "$non_lmaster"
+
+echo "Kill lock process ${pid} on node ${non_lmaster}"
+try_command_on_node "$non_lmaster" "kill ${pid}"
+ctdb_test_cleanup_pid_clear
+
+echo "Wait until record is migrated to lmaster node ${lmaster}"
+vacuum_test_wait_key_dmaster "$lmaster" "$db" "$key"
+
+echo
+echo "Confirm that all nodes still have the record"
+check_cattdb_num_records "$db" 1 "$all_pnns"
+
+echo "Do a fast vacuuming run on node ${lmaster}"
+testprog_onnode "$lmaster" "ctdb-db-test vacuum ${db}"
+
+echo
+echo "Confirm that the record is gone from all nodes"
+check_cattdb_num_records "$db" 0 "$all_pnns"
diff --git a/ctdb/tests/INTEGRATION/database/vacuum.032.locked.sh b/ctdb/tests/INTEGRATION/database/vacuum.032.locked.sh
new file mode 100755
index 0000000..481d1d4
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/vacuum.032.locked.sh
@@ -0,0 +1,102 @@
+#!/usr/bin/env bash
+
+# Confirm that a record is not vacuumed if it is locked on the lmaster
+# when the 3rd fast vacuuming run occurs, but is dropped from the
+# lmaster delete queue
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+db="vacuum_test.tdb"
+key="key"
+
+echo "Stall vacuuming on all nodes"
+ctdb_onnode -p all "setvar VacuumInterval 99999"
+
+echo
+echo "Getting list of nodes..."
+ctdb_get_all_pnns
+
+# all_pnns is set above by ctdb_get_all_pnns()
+# shellcheck disable=SC2154
+first=$(echo "$all_pnns" | sed -n -e '1p')
+
+echo
+echo "Determine lmaster node for key"
+testprog_onnode "$first" "ctdb-db-test get-lmaster key"
+# $out is set above by testprog_onnode()
+# shellcheck disable=SC2154
+lmaster="$out"
+echo "lmaster=${lmaster}"
+
+non_lmaster=""
+# Find a non-lmaster node
+for i in $all_pnns ; do
+ if [ "$i" != "$lmaster" ] ; then
+ non_lmaster="$i"
+ break
+ fi
+done
+if [ -z "$non_lmaster" ] ; then
+ ctdb_test_fail "Could not find non-lmaster node for key"
+fi
+
+echo "............................................................"
+echo "Delete key ${key} on node ${non_lmaster}"
+echo "Lock on lmaster node ${lmaster} during 3rd vacuuming run"
+echo "............................................................"
+
+echo
+
+echo "Create/wipe test database ${db}"
+ctdb_onnode "$first" "attach ${db}"
+ctdb_onnode "$first" "wipedb ${db}"
+
+echo "Create a record in ${db}"
+ctdb_onnode "$first" "writekey ${db} ${key} value1"
+
+echo "Migrate record to all nodes"
+ctdb_onnode all "readkey ${db} ${key}"
+
+echo "Confirm that all nodes have the record"
+check_cattdb_num_records "$db" 1 "$all_pnns"
+
+echo
+
+echo "Delete key \"${key}\" from node ${non_lmaster}"
+ctdb_onnode "$non_lmaster" "deletekey $db ${key}"
+
+echo "Do a fast vacuuming run on node ${non_lmaster}"
+testprog_onnode "$non_lmaster" "ctdb-db-test vacuum ${db}"
+
+echo "Do a fast vacuuming run on lmaster node ${lmaster}"
+testprog_onnode "$lmaster" "ctdb-db-test vacuum ${db}"
+
+echo "Lock record on lmaster node ${lmaster}"
+testprog_onnode "$lmaster" "ctdb-db-test local-lock ${db} ${key}"
+pid="${out#OK }"
+ctdb_test_cleanup_pid_set "$lmaster" "$pid"
+
+echo "Do a fast vacuuming run on node ${lmaster}"
+testprog_onnode "$lmaster" "ctdb-db-test vacuum ${db}"
+
+echo "Kill lock process ${pid} on node ${lmaster}"
+try_command_on_node "$lmaster" "kill ${pid}"
+ctdb_test_cleanup_pid_clear
+
+echo
+
+# If the record is still in the delete queue then this will process it
+echo "Do a fast vacuuming run on lmaster node ${lmaster}"
+testprog_onnode "$lmaster" "ctdb-db-test vacuum ${db}"
+
+echo
+
+echo "Confirm the record still exists on all nodes"
+check_cattdb_num_records "$db" 1 "$all_pnns"
+
+echo
+vacuum_confirm_key_empty_dmaster "$lmaster" "$db" "$key"
diff --git a/ctdb/tests/INTEGRATION/database/vacuum.033.locked.sh b/ctdb/tests/INTEGRATION/database/vacuum.033.locked.sh
new file mode 100755
index 0000000..63d7d1f
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/vacuum.033.locked.sh
@@ -0,0 +1,117 @@
+#!/usr/bin/env bash
+
+# Confirm that a record is not vacuumed if it is locked on the
+# deleting node when the 3rd fast vacuuming run occurs, but is dropped
+# from the lmaster delete list
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+db="vacuum_test.tdb"
+key="key"
+
+echo "Stall vacuuming on all nodes"
+ctdb_onnode -p all "setvar VacuumInterval 99999"
+
+echo
+echo "Getting list of nodes..."
+ctdb_get_all_pnns
+
+# all_pnns is set above by ctdb_get_all_pnns()
+# shellcheck disable=SC2154
+first=$(echo "$all_pnns" | sed -n -e '1p')
+
+echo
+echo "Determine lmaster node for key"
+testprog_onnode "$first" "ctdb-db-test get-lmaster key"
+# $out is set above by testprog_onnode()
+# shellcheck disable=SC2154
+lmaster="$out"
+echo "lmaster=${lmaster}"
+
+non_lmaster=""
+# Find a non-lmaster node
+for i in $all_pnns ; do
+ if [ "$i" != "$lmaster" ] ; then
+ non_lmaster="$i"
+ break
+ fi
+done
+if [ -z "$non_lmaster" ] ; then
+ ctdb_test_fail "Could not find non-lmaster node for key"
+fi
+
+echo "............................................................"
+echo "Delete key ${key} on node ${non_lmaster}"
+echo "Lock on non-lmaster node ${non_lmaster} during 3rd vacuuming run"
+echo "............................................................"
+
+echo
+
+echo "Create/wipe test database ${db}"
+ctdb_onnode "$first" "attach ${db}"
+ctdb_onnode "$first" "wipedb ${db}"
+
+echo "Create a record in ${db}"
+ctdb_onnode "$first" "writekey ${db} ${key} value1"
+
+echo "Migrate record to all nodes"
+ctdb_onnode all "readkey ${db} ${key}"
+
+echo "Confirm that all nodes have the record"
+check_cattdb_num_records "$db" 1 "$all_pnns"
+
+echo
+
+echo "Delete key \"${key}\" from node ${non_lmaster}"
+ctdb_onnode "$non_lmaster" "deletekey $db ${key}"
+
+echo
+echo "Do a fast vacuuming run on node ${non_lmaster}"
+testprog_onnode "$non_lmaster" "ctdb-db-test vacuum ${db}"
+
+echo
+echo "Confirm that all nodes still have the record"
+check_cattdb_num_records "$db" 1 "$all_pnns"
+
+echo
+echo "Do a fast vacuuming run on lmaster node ${lmaster}"
+testprog_onnode "$lmaster" "ctdb-db-test vacuum ${db}"
+
+echo
+echo "Confirm that all nodes still have the record"
+check_cattdb_num_records "$db" 1 "$all_pnns"
+
+echo
+echo "Lock record on non-lmaster node ${non_lmaster}"
+testprog_onnode "$non_lmaster" "ctdb-db-test local-lock ${db} ${key}"
+pid="${out#OK }"
+ctdb_test_cleanup_pid_set "$non_lmaster" "$pid"
+
+echo "Do a fast vacuuming run on node ${lmaster}"
+testprog_onnode "$lmaster" "ctdb-db-test vacuum ${db}"
+
+echo "Kill lock process ${pid} on node ${non_lmaster}"
+try_command_on_node "$non_lmaster" "kill ${pid}"
+ctdb_test_cleanup_pid_clear
+
+echo
+echo "Confirm that nodes ${lmaster} and ${non_lmaster} still have the record"
+check_cattdb_num_records "$db" 1 "${lmaster} ${non_lmaster}"
+
+vacuum_confirm_key_empty_dmaster "$lmaster" "$db" "$key"
+
+echo
+
+# Record has been dropped from the delete list so this will not pick it up
+echo "Do a fast vacuuming run on lmaster node ${lmaster}"
+testprog_onnode "$lmaster" "ctdb-db-test vacuum ${db}"
+
+echo
+echo "Confirm that nodes ${lmaster} and ${non_lmaster} still have the record"
+check_cattdb_num_records "$db" 1 "${lmaster} ${non_lmaster}"
+
+vacuum_confirm_key_empty_dmaster "$lmaster" "$db" "$key"
diff --git a/ctdb/tests/INTEGRATION/database/vacuum.034.locked.sh b/ctdb/tests/INTEGRATION/database/vacuum.034.locked.sh
new file mode 100755
index 0000000..7f37ada
--- /dev/null
+++ b/ctdb/tests/INTEGRATION/database/vacuum.034.locked.sh
@@ -0,0 +1,129 @@
+#!/usr/bin/env bash
+
+# Confirm that a record is not vacuumed if it is locked on another
+# (non-lmaster, non-deleting) node when the 3rd fast vacuuming run
+# occurs, but is dropped from the lmaster delete tree
+
+. "${TEST_SCRIPTS_DIR}/integration.bash"
+
+set -e
+
+ctdb_test_init
+
+db="vacuum_test.tdb"
+key="key"
+
+echo "Stall vacuuming on all nodes"
+ctdb_onnode -p all "setvar VacuumInterval 99999"
+
+echo
+echo "Getting list of nodes..."
+ctdb_get_all_pnns
+
+# all_pnns is set above by ctdb_get_all_pnns()
+# shellcheck disable=SC2154
+first=$(echo "$all_pnns" | sed -n -e '1p')
+
+echo
+echo "Determine lmaster node for key"
+testprog_onnode "$first" "ctdb-db-test get-lmaster key"
+# $out is set above by testprog_onnode()
+# shellcheck disable=SC2154
+lmaster="$out"
+echo "lmaster=${lmaster}"
+
+non_lmaster=""
+# Find a non-lmaster node
+for i in $all_pnns ; do
+ if [ "$i" != "$lmaster" ] ; then
+ non_lmaster="$i"
+ break
+ fi
+done
+if [ -z "$non_lmaster" ] ; then
+ ctdb_test_fail "Could not find non-lmaster node for key"
+fi
+
+another_node=""
+# Find another node
+for i in $all_pnns ; do
+ if [ "$i" != "$lmaster" ] && [ "$i" != "$non_lmaster" ] ; then
+ another_node="$i"
+ break
+ fi
+done
+if [ -z "$another_node" ] ; then
+ ctdb_test_fail "Could not find another non-lmaster node for key"
+fi
+
+echo "............................................................"
+echo "Delete key ${key} on node ${non_lmaster}"
+echo "Lock on non-lmaster node ${non_lmaster} during 3rd vacuuming run"
+echo "............................................................"
+
+echo
+
+echo "Create/wipe test database ${db}"
+ctdb_onnode "$first" "attach ${db}"
+ctdb_onnode "$first" "wipedb ${db}"
+
+echo "Create a record in ${db}"
+ctdb_onnode "$first" "writekey ${db} ${key} value1"
+
+echo "Migrate record to all nodes"
+ctdb_onnode all "readkey ${db} ${key}"
+
+echo "Confirm that all nodes have the record"
+check_cattdb_num_records "$db" 1 "$all_pnns"
+
+echo
+
+echo "Delete key \"${key}\" from node ${non_lmaster}"
+ctdb_onnode "$non_lmaster" "deletekey $db ${key}"
+
+echo
+echo "Do a fast vacuuming run on node ${non_lmaster}"
+testprog_onnode "$non_lmaster" "ctdb-db-test vacuum ${db}"
+
+echo
+echo "Confirm that all nodes still have the record"
+check_cattdb_num_records "$db" 1 "$all_pnns"
+
+echo
+echo "Do a fast vacuuming run on lmaster node ${lmaster}"
+testprog_onnode "$lmaster" "ctdb-db-test vacuum ${db}"
+
+echo
+echo "Confirm that all nodes still have the record"
+check_cattdb_num_records "$db" 1 "$all_pnns"
+
+echo
+echo "Lock record on non-lmaster node ${another_node}"
+testprog_onnode "$another_node" "ctdb-db-test local-lock ${db} ${key}"
+pid="${out#OK }"
+ctdb_test_cleanup_pid_set "$another_node" "$pid"
+
+echo "Do a fast vacuuming run on node ${lmaster}"
+testprog_onnode "$lmaster" "ctdb-db-test vacuum ${db}"
+
+echo "Kill lock process ${pid} on node ${another_node}"
+try_command_on_node "$another_node" "kill ${pid}"
+ctdb_test_cleanup_pid_clear
+
+echo
+echo "Confirm that nodes ${lmaster} and ${another_node} still have the record"
+check_cattdb_num_records "$db" 1 "${lmaster} ${another_node}"
+
+vacuum_confirm_key_empty_dmaster "$lmaster" "$db" "$key"
+
+echo
+
+# Record has been dropped from the delete list so this will not pick it up
+echo "Do a fast vacuuming run on lmaster node ${lmaster}"
+testprog_onnode "$lmaster" "ctdb-db-test vacuum ${db}"
+
+echo
+echo "Confirm that nodes ${lmaster} and ${another_node} still have the record"
+check_cattdb_num_records "$db" 1 "${lmaster} ${another_node}"
+
+vacuum_confirm_key_empty_dmaster "$lmaster" "$db" "$key"