summaryrefslogtreecommitdiffstats
path: root/tests/unit/cluster
diff options
context:
space:
mode:
Diffstat (limited to 'tests/unit/cluster')
-rw-r--r--tests/unit/cluster/announced-endpoints.tcl42
-rw-r--r--tests/unit/cluster/cli.tcl416
-rw-r--r--tests/unit/cluster/cluster-response-tls.tcl110
-rw-r--r--tests/unit/cluster/hostnames.tcl203
-rw-r--r--tests/unit/cluster/human-announced-nodename.tcl29
-rw-r--r--tests/unit/cluster/links.tcl292
-rw-r--r--tests/unit/cluster/misc.tcl26
-rw-r--r--tests/unit/cluster/multi-slot-operations.tcl109
-rw-r--r--tests/unit/cluster/scripting.tcl70
-rw-r--r--tests/unit/cluster/slot-ownership.tcl61
10 files changed, 1358 insertions, 0 deletions
diff --git a/tests/unit/cluster/announced-endpoints.tcl b/tests/unit/cluster/announced-endpoints.tcl
new file mode 100644
index 0000000..941a8e0
--- /dev/null
+++ b/tests/unit/cluster/announced-endpoints.tcl
@@ -0,0 +1,42 @@
+start_cluster 2 2 {tags {external:skip cluster}} {
+
+ test "Test change cluster-announce-port and cluster-announce-tls-port at runtime" {
+ set baseport [lindex [R 0 config get port] 1]
+ set count [expr [llength $::servers] +1 ]
+ set used_port [find_available_port $baseport $count]
+
+ R 0 config set cluster-announce-tls-port $used_port
+ R 0 config set cluster-announce-port $used_port
+
+ assert_match "*:$used_port@*" [R 0 CLUSTER NODES]
+ wait_for_condition 50 100 {
+ [string match "*:$used_port@*" [R 1 CLUSTER NODES]]
+ } else {
+ fail "Cluster announced port was not propagated via gossip"
+ }
+
+ R 0 config set cluster-announce-tls-port 0
+ R 0 config set cluster-announce-port 0
+ assert_match "*:$baseport@*" [R 0 CLUSTER NODES]
+ }
+
+ test "Test change cluster-announce-bus-port at runtime" {
+ set baseport [lindex [R 0 config get port] 1]
+ set count [expr [llength $::servers] +1 ]
+ set used_port [find_available_port $baseport $count]
+
+ # Verify config set cluster-announce-bus-port
+ R 0 config set cluster-announce-bus-port $used_port
+ assert_match "*@$used_port *" [R 0 CLUSTER NODES]
+ wait_for_condition 50 100 {
+ [string match "*@$used_port *" [R 1 CLUSTER NODES]]
+ } else {
+ fail "Cluster announced port was not propagated via gossip"
+ }
+
+ # Verify restore default cluster-announce-port
+ set base_bus_port [expr $baseport + 10000]
+ R 0 config set cluster-announce-bus-port 0
+ assert_match "*@$base_bus_port *" [R 0 CLUSTER NODES]
+ }
+}
diff --git a/tests/unit/cluster/cli.tcl b/tests/unit/cluster/cli.tcl
new file mode 100644
index 0000000..76e9721
--- /dev/null
+++ b/tests/unit/cluster/cli.tcl
@@ -0,0 +1,416 @@
+# Primitive tests on cluster-enabled redis using redis-cli
+
+source tests/support/cli.tcl
+
+# make sure the test infra won't use SELECT
+set old_singledb $::singledb
+set ::singledb 1
+
+# cluster creation is complicated with TLS, and the current tests don't really need that coverage
+tags {tls:skip external:skip cluster} {
+
+# start three servers
+set base_conf [list cluster-enabled yes cluster-node-timeout 1000]
+start_multiple_servers 3 [list overrides $base_conf] {
+
+ set node1 [srv 0 client]
+ set node2 [srv -1 client]
+ set node3 [srv -2 client]
+ set node3_pid [srv -2 pid]
+ set node3_rd [redis_deferring_client -2]
+
+ test {Create 3 node cluster} {
+ exec src/redis-cli --cluster-yes --cluster create \
+ 127.0.0.1:[srv 0 port] \
+ 127.0.0.1:[srv -1 port] \
+ 127.0.0.1:[srv -2 port]
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+ }
+
+ test "Run blocking command on cluster node3" {
+ # key9184688 is mapped to slot 10923 (first slot of node 3)
+ $node3_rd brpop key9184688 0
+ $node3_rd flush
+
+ wait_for_condition 50 100 {
+ [s -2 blocked_clients] eq {1}
+ } else {
+ fail "Client not blocked"
+ }
+ }
+
+ test "Perform a Resharding" {
+ exec src/redis-cli --cluster-yes --cluster reshard 127.0.0.1:[srv -2 port] \
+ --cluster-to [$node1 cluster myid] \
+ --cluster-from [$node3 cluster myid] \
+ --cluster-slots 1
+ }
+
+ test "Verify command got unblocked after resharding" {
+ # this (read) will wait for the node3 to realize the new topology
+ assert_error {*MOVED*} {$node3_rd read}
+
+ # verify there are no blocked clients
+ assert_equal [s 0 blocked_clients] {0}
+ assert_equal [s -1 blocked_clients] {0}
+ assert_equal [s -2 blocked_clients] {0}
+ }
+
+ test "Wait for cluster to be stable" {
+ # Cluster check just verifies the config state is self-consistent,
+ # waiting for cluster_state to be okay is an independent check that all the
+ # nodes actually believe each other are healthy, prevent cluster down error.
+ wait_for_condition 1000 50 {
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv 0 port]}] == 0 &&
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv -1 port]}] == 0 &&
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv -2 port]}] == 0 &&
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+ }
+
+ set node1_rd [redis_deferring_client 0]
+
+ test "use previous hostip in \"cluster-preferred-endpoint-type unknown-endpoint\" mode" {
+
+ # backup and set cluster-preferred-endpoint-type unknown-endpoint
+ set endpoint_type_before_set [lindex [split [$node1 CONFIG GET cluster-preferred-endpoint-type] " "] 1]
+ $node1 CONFIG SET cluster-preferred-endpoint-type unknown-endpoint
+
+ # when redis-cli not in cluster mode, return MOVE with empty host
+ set slot_for_foo [$node1 CLUSTER KEYSLOT foo]
+ assert_error "*MOVED $slot_for_foo :*" {$node1 set foo bar}
+
+ # when in cluster mode, redirect using previous hostip
+ assert_equal "[exec src/redis-cli -h 127.0.0.1 -p [srv 0 port] -c set foo bar]" {OK}
+ assert_match "[exec src/redis-cli -h 127.0.0.1 -p [srv 0 port] -c get foo]" {bar}
+
+ assert_equal [$node1 CONFIG SET cluster-preferred-endpoint-type "$endpoint_type_before_set"] {OK}
+ }
+
+ test "Sanity test push cmd after resharding" {
+ assert_error {*MOVED*} {$node3 lpush key9184688 v1}
+
+ $node1_rd brpop key9184688 0
+ $node1_rd flush
+
+ wait_for_condition 50 100 {
+ [s 0 blocked_clients] eq {1}
+ } else {
+ puts "Client not blocked"
+ puts "read from blocked client: [$node1_rd read]"
+ fail "Client not blocked"
+ }
+
+ $node1 lpush key9184688 v2
+ assert_equal {key9184688 v2} [$node1_rd read]
+ }
+
+ $node3_rd close
+
+ test "Run blocking command again on cluster node1" {
+ $node1 del key9184688
+ # key9184688 is mapped to slot 10923 which has been moved to node1
+ $node1_rd brpop key9184688 0
+ $node1_rd flush
+
+ wait_for_condition 50 100 {
+ [s 0 blocked_clients] eq {1}
+ } else {
+ fail "Client not blocked"
+ }
+ }
+
+ test "Kill a cluster node and wait for fail state" {
+ # kill node3 in cluster
+ pause_process $node3_pid
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {fail} &&
+ [CI 1 cluster_state] eq {fail}
+ } else {
+ fail "Cluster doesn't fail"
+ }
+ }
+
+ test "Verify command got unblocked after cluster failure" {
+ assert_error {*CLUSTERDOWN*} {$node1_rd read}
+
+ # verify there are no blocked clients
+ assert_equal [s 0 blocked_clients] {0}
+ assert_equal [s -1 blocked_clients] {0}
+ }
+
+ resume_process $node3_pid
+ $node1_rd close
+
+} ;# stop servers
+
+# Test redis-cli -- cluster create, add-node, call.
+# Test that functions are propagated on add-node
+start_multiple_servers 5 [list overrides $base_conf] {
+
+ set node4_rd [redis_client -3]
+ set node5_rd [redis_client -4]
+
+ test {Functions are added to new node on redis-cli cluster add-node} {
+ exec src/redis-cli --cluster-yes --cluster create \
+ 127.0.0.1:[srv 0 port] \
+ 127.0.0.1:[srv -1 port] \
+ 127.0.0.1:[srv -2 port]
+
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # upload a function to all the cluster
+ exec src/redis-cli --cluster-yes --cluster call 127.0.0.1:[srv 0 port] \
+ FUNCTION LOAD {#!lua name=TEST
+ redis.register_function('test', function() return 'hello' end)
+ }
+
+ # adding node to the cluster
+ exec src/redis-cli --cluster-yes --cluster add-node \
+ 127.0.0.1:[srv -3 port] \
+ 127.0.0.1:[srv 0 port]
+
+ wait_for_cluster_size 4
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok} &&
+ [CI 3 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # make sure 'test' function was added to the new node
+ assert_equal {{library_name TEST engine LUA functions {{name test description {} flags {}}}}} [$node4_rd FUNCTION LIST]
+
+ # add function to node 5
+ assert_equal {TEST} [$node5_rd FUNCTION LOAD {#!lua name=TEST
+ redis.register_function('test', function() return 'hello' end)
+ }]
+
+ # make sure functions was added to node 5
+ assert_equal {{library_name TEST engine LUA functions {{name test description {} flags {}}}}} [$node5_rd FUNCTION LIST]
+
+ # adding node 5 to the cluster should failed because it already contains the 'test' function
+ catch {
+ exec src/redis-cli --cluster-yes --cluster add-node \
+ 127.0.0.1:[srv -4 port] \
+ 127.0.0.1:[srv 0 port]
+ } e
+ assert_match {*node already contains functions*} $e
+ }
+} ;# stop servers
+
+# Test redis-cli --cluster create, add-node.
+# Test that one slot can be migrated to and then away from the new node.
+test {Migrate the last slot away from a node using redis-cli} {
+ start_multiple_servers 4 [list overrides $base_conf] {
+
+ # Create a cluster of 3 nodes
+ exec src/redis-cli --cluster-yes --cluster create \
+ 127.0.0.1:[srv 0 port] \
+ 127.0.0.1:[srv -1 port] \
+ 127.0.0.1:[srv -2 port]
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # Insert some data
+ assert_equal OK [exec src/redis-cli -c -p [srv 0 port] SET foo bar]
+ set slot [exec src/redis-cli -c -p [srv 0 port] CLUSTER KEYSLOT foo]
+
+ # Add new node to the cluster
+ exec src/redis-cli --cluster-yes --cluster add-node \
+ 127.0.0.1:[srv -3 port] \
+ 127.0.0.1:[srv 0 port]
+
+ # First we wait for new node to be recognized by entire cluster
+ wait_for_cluster_size 4
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok} &&
+ [CI 3 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ set newnode_r [redis_client -3]
+ set newnode_id [$newnode_r CLUSTER MYID]
+
+ # Find out which node has the key "foo" by asking the new node for a
+ # redirect.
+ catch { $newnode_r get foo } e
+ assert_match "MOVED $slot *" $e
+ lassign [split [lindex $e 2] :] owner_host owner_port
+ set owner_r [redis $owner_host $owner_port 0 $::tls]
+ set owner_id [$owner_r CLUSTER MYID]
+
+ # Move slot to new node using plain Redis commands
+ assert_equal OK [$newnode_r CLUSTER SETSLOT $slot IMPORTING $owner_id]
+ assert_equal OK [$owner_r CLUSTER SETSLOT $slot MIGRATING $newnode_id]
+ assert_equal {foo} [$owner_r CLUSTER GETKEYSINSLOT $slot 10]
+ assert_equal OK [$owner_r MIGRATE 127.0.0.1 [srv -3 port] "" 0 5000 KEYS foo]
+ assert_equal OK [$newnode_r CLUSTER SETSLOT $slot NODE $newnode_id]
+ assert_equal OK [$owner_r CLUSTER SETSLOT $slot NODE $newnode_id]
+
+ # Using --cluster check make sure we won't get `Not all slots are covered by nodes`.
+ # Wait for the cluster to become stable make sure the cluster is up during MIGRATE.
+ wait_for_condition 1000 50 {
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv 0 port]}] == 0 &&
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv -1 port]}] == 0 &&
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv -2 port]}] == 0 &&
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv -3 port]}] == 0 &&
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok} &&
+ [CI 3 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # Move the only slot back to original node using redis-cli
+ exec src/redis-cli --cluster reshard 127.0.0.1:[srv -3 port] \
+ --cluster-from $newnode_id \
+ --cluster-to $owner_id \
+ --cluster-slots 1 \
+ --cluster-yes
+
+ # The empty node will become a replica of the new owner before the
+ # `MOVED` check, so let's wait for the cluster to become stable.
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok} &&
+ [CI 3 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # Check that the key foo has been migrated back to the original owner.
+ catch { $newnode_r get foo } e
+ assert_equal "MOVED $slot $owner_host:$owner_port" $e
+
+ # Check that the empty node has turned itself into a replica of the new
+ # owner and that the new owner knows that.
+ wait_for_condition 1000 50 {
+ [string match "*slave*" [$owner_r CLUSTER REPLICAS $owner_id]]
+ } else {
+ fail "Empty node didn't turn itself into a replica."
+ }
+ }
+}
+
+# Test redis-cli --cluster create, add-node with cluster-port.
+# Create five nodes, three with custom cluster_port and two with default values.
+start_server [list overrides [list cluster-enabled yes cluster-node-timeout 1 cluster-port [find_available_port $::baseport $::portcount]]] {
+start_server [list overrides [list cluster-enabled yes cluster-node-timeout 1]] {
+start_server [list overrides [list cluster-enabled yes cluster-node-timeout 1 cluster-port [find_available_port $::baseport $::portcount]]] {
+start_server [list overrides [list cluster-enabled yes cluster-node-timeout 1]] {
+start_server [list overrides [list cluster-enabled yes cluster-node-timeout 1 cluster-port [find_available_port $::baseport $::portcount]]] {
+
+ # The first three are used to test --cluster create.
+ # The last two are used to test --cluster add-node
+ set node1_rd [redis_client 0]
+ set node2_rd [redis_client -1]
+ set node3_rd [redis_client -2]
+ set node4_rd [redis_client -3]
+ set node5_rd [redis_client -4]
+
+ test {redis-cli --cluster create with cluster-port} {
+ exec src/redis-cli --cluster-yes --cluster create \
+ 127.0.0.1:[srv 0 port] \
+ 127.0.0.1:[srv -1 port] \
+ 127.0.0.1:[srv -2 port]
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # Make sure each node can meet other nodes
+ assert_equal 3 [CI 0 cluster_known_nodes]
+ assert_equal 3 [CI 1 cluster_known_nodes]
+ assert_equal 3 [CI 2 cluster_known_nodes]
+ }
+
+ test {redis-cli --cluster add-node with cluster-port} {
+ # Adding node to the cluster (without cluster-port)
+ exec src/redis-cli --cluster-yes --cluster add-node \
+ 127.0.0.1:[srv -3 port] \
+ 127.0.0.1:[srv 0 port]
+
+ wait_for_cluster_size 4
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok} &&
+ [CI 3 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # Adding node to the cluster (with cluster-port)
+ exec src/redis-cli --cluster-yes --cluster add-node \
+ 127.0.0.1:[srv -4 port] \
+ 127.0.0.1:[srv 0 port]
+
+ wait_for_cluster_size 5
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok} &&
+ [CI 3 cluster_state] eq {ok} &&
+ [CI 4 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # Make sure each node can meet other nodes
+ assert_equal 5 [CI 0 cluster_known_nodes]
+ assert_equal 5 [CI 1 cluster_known_nodes]
+ assert_equal 5 [CI 2 cluster_known_nodes]
+ assert_equal 5 [CI 3 cluster_known_nodes]
+ assert_equal 5 [CI 4 cluster_known_nodes]
+ }
+# stop 5 servers
+}
+}
+}
+}
+}
+
+} ;# tags
+
+set ::singledb $old_singledb
diff --git a/tests/unit/cluster/cluster-response-tls.tcl b/tests/unit/cluster/cluster-response-tls.tcl
new file mode 100644
index 0000000..a099fa7
--- /dev/null
+++ b/tests/unit/cluster/cluster-response-tls.tcl
@@ -0,0 +1,110 @@
+source tests/support/cluster.tcl
+
+proc get_port_from_moved_error {e} {
+ set ip_port [lindex [split $e " "] 2]
+ return [lindex [split $ip_port ":"] 1]
+}
+
+proc get_pport_by_port {port} {
+ foreach srv $::servers {
+ set srv_port [dict get $srv port]
+ if {$port == $srv_port} {
+ return [dict get $srv pport]
+ }
+ }
+ return 0
+}
+
+proc get_port_from_node_info {line} {
+ set fields [split $line " "]
+ set addr [lindex $fields 1]
+ set ip_port [lindex [split $addr "@"] 0]
+ return [lindex [split $ip_port ":"] 1]
+}
+
+proc cluster_response_tls {tls_cluster} {
+
+ test "CLUSTER SLOTS with different connection type -- tls-cluster $tls_cluster" {
+ set slots1 [R 0 cluster slots]
+ set pport [srv 0 pport]
+ set cluster_client [redis_cluster 127.0.0.1:$pport 0]
+ set slots2 [$cluster_client cluster slots]
+ $cluster_client close
+ # Compare the ports in the first row
+ assert_no_match [lindex $slots1 0 2 1] [lindex $slots2 0 2 1]
+ }
+
+ test "CLUSTER NODES return port according to connection type -- tls-cluster $tls_cluster" {
+ set nodes [R 0 cluster nodes]
+ set port1 [get_port_from_node_info [lindex [split $nodes "\r\n"] 0]]
+ set pport [srv 0 pport]
+ set cluster_client [redis_cluster 127.0.0.1:$pport 0]
+ set nodes [$cluster_client cluster nodes]
+ set port2 [get_port_from_node_info [lindex [split $nodes "\r\n"] 0]]
+ $cluster_client close
+ assert_not_equal $port1 $port2
+ }
+
+ set cluster [redis_cluster 127.0.0.1:[srv 0 port]]
+ set cluster_pport [redis_cluster 127.0.0.1:[srv 0 pport] 0]
+ $cluster refresh_nodes_map
+
+ test "Set many keys in the cluster -- tls-cluster $tls_cluster" {
+ for {set i 0} {$i < 5000} {incr i} {
+ $cluster set $i $i
+ assert { [$cluster get $i] eq $i }
+ }
+ }
+
+ test "Test cluster responses during migration of slot x -- tls-cluster $tls_cluster" {
+ set slot 10
+ array set nodefrom [$cluster masternode_for_slot $slot]
+ array set nodeto [$cluster masternode_notfor_slot $slot]
+ $nodeto(link) cluster setslot $slot importing $nodefrom(id)
+ $nodefrom(link) cluster setslot $slot migrating $nodeto(id)
+
+ # Get a key from that slot
+ set key [$nodefrom(link) cluster GETKEYSINSLOT $slot "1"]
+ # MOVED REPLY
+ catch {$nodeto(link) set $key "newVal"} e_moved1
+ assert_match "*MOVED*" $e_moved1
+ # ASK REPLY
+ catch {$nodefrom(link) set "abc{$key}" "newVal"} e_ask1
+ assert_match "*ASK*" $e_ask1
+
+ # UNSTABLE REPLY
+ assert_error "*TRYAGAIN*" {$nodefrom(link) mset "a{$key}" "newVal" $key "newVal2"}
+
+ # Connecting using another protocol
+ array set nodefrom_pport [$cluster_pport masternode_for_slot $slot]
+ array set nodeto_pport [$cluster_pport masternode_notfor_slot $slot]
+
+ # MOVED REPLY
+ catch {$nodeto_pport(link) set $key "newVal"} e_moved2
+ assert_match "*MOVED*" $e_moved2
+ # ASK REPLY
+ catch {$nodefrom_pport(link) set "abc{$key}" "newVal"} e_ask2
+ assert_match "*ASK*" $e_ask2
+ # Compare MOVED error's port
+ set port1 [get_port_from_moved_error $e_moved1]
+ set port2 [get_port_from_moved_error $e_moved2]
+ assert_not_equal $port1 $port2
+ assert_equal $port1 $nodefrom(port)
+ assert_equal $port2 [get_pport_by_port $nodefrom(port)]
+ # Compare ASK error's port
+ set port1 [get_port_from_moved_error $e_ask1]
+ set port2 [get_port_from_moved_error $e_ask2]
+ assert_not_equal $port1 $port2
+ assert_equal $port1 $nodeto(port)
+ assert_equal $port2 [get_pport_by_port $nodeto(port)]
+ }
+}
+
+if {$::tls} {
+ start_cluster 3 3 {tags {external:skip cluster tls} overrides {tls-cluster yes tls-replication yes}} {
+ cluster_response_tls yes
+ }
+ start_cluster 3 3 {tags {external:skip cluster tls} overrides {tls-cluster no tls-replication no}} {
+ cluster_response_tls no
+ }
+}
diff --git a/tests/unit/cluster/hostnames.tcl b/tests/unit/cluster/hostnames.tcl
new file mode 100644
index 0000000..f318240
--- /dev/null
+++ b/tests/unit/cluster/hostnames.tcl
@@ -0,0 +1,203 @@
+proc get_slot_field {slot_output shard_id node_id attrib_id} {
+ return [lindex [lindex [lindex $slot_output $shard_id] $node_id] $attrib_id]
+}
+
+# Start a cluster with 3 masters and 4 replicas.
+# These tests rely on specific node ordering, so make sure no node fails over.
+start_cluster 3 4 {tags {external:skip cluster} overrides {cluster-replica-no-failover yes}} {
+test "Set cluster hostnames and verify they are propagated" {
+ for {set j 0} {$j < [llength $::servers]} {incr j} {
+ R $j config set cluster-announce-hostname "host-$j.com"
+ }
+
+ wait_for_condition 50 100 {
+ [are_hostnames_propagated "host-*.com"] eq 1
+ } else {
+ fail "cluster hostnames were not propagated"
+ }
+
+ # Now that everything is propagated, assert everyone agrees
+ wait_for_cluster_propagation
+}
+
+test "Update hostnames and make sure they are all eventually propagated" {
+ for {set j 0} {$j < [llength $::servers]} {incr j} {
+ R $j config set cluster-announce-hostname "host-updated-$j.com"
+ }
+
+ wait_for_condition 50 100 {
+ [are_hostnames_propagated "host-updated-*.com"] eq 1
+ } else {
+ fail "cluster hostnames were not propagated"
+ }
+
+ # Now that everything is propagated, assert everyone agrees
+ wait_for_cluster_propagation
+}
+
+test "Remove hostnames and make sure they are all eventually propagated" {
+ for {set j 0} {$j < [llength $::servers]} {incr j} {
+ R $j config set cluster-announce-hostname ""
+ }
+
+ wait_for_condition 50 100 {
+ [are_hostnames_propagated ""] eq 1
+ } else {
+ fail "cluster hostnames were not propagated"
+ }
+
+ # Now that everything is propagated, assert everyone agrees
+ wait_for_cluster_propagation
+}
+
+test "Verify cluster-preferred-endpoint-type behavior for redirects and info" {
+ R 0 config set cluster-announce-hostname "me.com"
+ R 1 config set cluster-announce-hostname ""
+ R 2 config set cluster-announce-hostname "them.com"
+
+ wait_for_cluster_propagation
+
+ # Verify default behavior
+ set slot_result [R 0 cluster slots]
+ assert_equal "" [lindex [get_slot_field $slot_result 0 2 0] 1]
+ assert_equal "" [lindex [get_slot_field $slot_result 2 2 0] 1]
+ assert_equal "hostname" [lindex [get_slot_field $slot_result 0 2 3] 0]
+ assert_equal "me.com" [lindex [get_slot_field $slot_result 0 2 3] 1]
+ assert_equal "hostname" [lindex [get_slot_field $slot_result 2 2 3] 0]
+ assert_equal "them.com" [lindex [get_slot_field $slot_result 2 2 3] 1]
+
+ # Redirect will use the IP address
+ catch {R 0 set foo foo} redir_err
+ assert_match "MOVED * 127.0.0.1:*" $redir_err
+
+ # Verify prefer hostname behavior
+ R 0 config set cluster-preferred-endpoint-type hostname
+
+ set slot_result [R 0 cluster slots]
+ assert_equal "me.com" [get_slot_field $slot_result 0 2 0]
+ assert_equal "them.com" [get_slot_field $slot_result 2 2 0]
+
+ # Redirect should use hostname
+ catch {R 0 set foo foo} redir_err
+ assert_match "MOVED * them.com:*" $redir_err
+
+ # Redirect to an unknown hostname returns ?
+ catch {R 0 set barfoo bar} redir_err
+ assert_match "MOVED * ?:*" $redir_err
+
+ # Verify unknown hostname behavior
+ R 0 config set cluster-preferred-endpoint-type unknown-endpoint
+
+ # Verify default behavior
+ set slot_result [R 0 cluster slots]
+ assert_equal "ip" [lindex [get_slot_field $slot_result 0 2 3] 0]
+ assert_equal "127.0.0.1" [lindex [get_slot_field $slot_result 0 2 3] 1]
+ assert_equal "ip" [lindex [get_slot_field $slot_result 2 2 3] 0]
+ assert_equal "127.0.0.1" [lindex [get_slot_field $slot_result 2 2 3] 1]
+ assert_equal "ip" [lindex [get_slot_field $slot_result 1 2 3] 0]
+ assert_equal "127.0.0.1" [lindex [get_slot_field $slot_result 1 2 3] 1]
+ # Not required by the protocol, but IP comes before hostname
+ assert_equal "hostname" [lindex [get_slot_field $slot_result 0 2 3] 2]
+ assert_equal "me.com" [lindex [get_slot_field $slot_result 0 2 3] 3]
+ assert_equal "hostname" [lindex [get_slot_field $slot_result 2 2 3] 2]
+ assert_equal "them.com" [lindex [get_slot_field $slot_result 2 2 3] 3]
+
+ # This node doesn't have a hostname
+ assert_equal 2 [llength [get_slot_field $slot_result 1 2 3]]
+
+ # Redirect should use empty string
+ catch {R 0 set foo foo} redir_err
+ assert_match "MOVED * :*" $redir_err
+
+ R 0 config set cluster-preferred-endpoint-type ip
+}
+
+test "Verify the nodes configured with prefer hostname only show hostname for new nodes" {
+ # Have everyone forget node 6 and isolate it from the cluster.
+ isolate_node 6
+
+ # Set hostnames for the masters, now that the node is isolated
+ R 0 config set cluster-announce-hostname "shard-1.com"
+ R 1 config set cluster-announce-hostname "shard-2.com"
+ R 2 config set cluster-announce-hostname "shard-3.com"
+
+ # Prevent Node 0 and Node 6 from properly meeting,
+ # they'll hang in the handshake phase. This allows us to
+ # test the case where we "know" about it but haven't
+ # successfully retrieved information about it yet.
+ R 0 DEBUG DROP-CLUSTER-PACKET-FILTER 0
+ R 6 DEBUG DROP-CLUSTER-PACKET-FILTER 0
+
+ # Have a replica meet the isolated node
+ R 3 cluster meet 127.0.0.1 [srv -6 port]
+
+ # Wait for the isolated node to learn about the rest of the cluster,
+ # which correspond to a single entry in cluster nodes. Note this
+ # doesn't mean the isolated node has successfully contacted each
+ # node.
+ wait_for_condition 50 100 {
+ [llength [split [R 6 CLUSTER NODES] "\n"]] eq [expr [llength $::servers] + 1]
+ } else {
+ fail "Isolated node didn't learn about the rest of the cluster *"
+ }
+
+ # Now, we wait until the two nodes that aren't filtering packets
+ # to accept our isolated nodes connections. At this point they will
+ # start showing up in cluster slots.
+ wait_for_condition 50 100 {
+ [llength [R 6 CLUSTER SLOTS]] eq 2
+ } else {
+ fail "Node did not learn about the 2 shards it can talk to"
+ }
+ set slot_result [R 6 CLUSTER SLOTS]
+ assert_equal [lindex [get_slot_field $slot_result 0 2 3] 1] "shard-2.com"
+ assert_equal [lindex [get_slot_field $slot_result 1 2 3] 1] "shard-3.com"
+
+ # Also make sure we know about the isolated master, we
+ # just can't reach it.
+ set master_id [R 0 CLUSTER MYID]
+ assert_match "*$master_id*" [R 6 CLUSTER NODES]
+
+ # Stop dropping cluster packets, and make sure everything
+ # stabilizes
+ R 0 DEBUG DROP-CLUSTER-PACKET-FILTER -1
+ R 6 DEBUG DROP-CLUSTER-PACKET-FILTER -1
+
+ # This operation sometimes spikes to around 5 seconds to resolve the state,
+ # so it has a higher timeout.
+ wait_for_condition 50 500 {
+ [llength [R 6 CLUSTER SLOTS]] eq 3
+ } else {
+ fail "Node did not learn about the 2 shards it can talk to"
+ }
+ set slot_result [R 6 CLUSTER SLOTS]
+ assert_equal [lindex [get_slot_field $slot_result 0 2 3] 1] "shard-1.com"
+ assert_equal [lindex [get_slot_field $slot_result 1 2 3] 1] "shard-2.com"
+ assert_equal [lindex [get_slot_field $slot_result 2 2 3] 1] "shard-3.com"
+}
+
+test "Test restart will keep hostname information" {
+ # Set a new hostname, reboot and make sure it sticks
+ R 0 config set cluster-announce-hostname "restart-1.com"
+
+ # Store the hostname in the config
+ R 0 config rewrite
+
+ restart_server 0 true false
+ set slot_result [R 0 CLUSTER SLOTS]
+ assert_equal [lindex [get_slot_field $slot_result 0 2 3] 1] "restart-1.com"
+
+ # As a sanity check, make sure everyone eventually agrees
+ wait_for_cluster_propagation
+}
+
+test "Test hostname validation" {
+ catch {R 0 config set cluster-announce-hostname [string repeat x 256]} err
+ assert_match "*Hostnames must be less than 256 characters*" $err
+ catch {R 0 config set cluster-announce-hostname "?.com"} err
+ assert_match "*Hostnames may only contain alphanumeric characters, hyphens or dots*" $err
+
+ # Note this isn't a valid hostname, but it passes our internal validation
+ R 0 config set cluster-announce-hostname "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-."
+}
+}
diff --git a/tests/unit/cluster/human-announced-nodename.tcl b/tests/unit/cluster/human-announced-nodename.tcl
new file mode 100644
index 0000000..a595ca6
--- /dev/null
+++ b/tests/unit/cluster/human-announced-nodename.tcl
@@ -0,0 +1,29 @@
+# Check if cluster's view of human announced nodename is reported in logs
+start_cluster 3 0 {tags {external:skip cluster}} {
+ test "Set cluster human announced nodename and let it propagate" {
+ for {set j 0} {$j < [llength $::servers]} {incr j} {
+ R $j config set cluster-announce-hostname "host-$j.com"
+ R $j config set cluster-announce-human-nodename "nodename-$j"
+ }
+
+ # We wait for everyone to agree on the hostnames. Since they are gossiped
+ # the same way as nodenames, it implies everyone knows the nodenames too.
+ wait_for_condition 50 100 {
+ [are_hostnames_propagated "host-*.com"] eq 1
+ } else {
+ fail "cluster hostnames were not propagated"
+ }
+ }
+
+ test "Human nodenames are visible in log messages" {
+ # Pause instance 0, so everyone thinks it is dead
+ pause_process [srv 0 pid]
+
+ # We're going to use a message we will know will be sent, node unreachable,
+ # since it includes the other node gossiping.
+ wait_for_log_messages -1 {"*Node * (nodename-2) reported node * (nodename-0) as not reachable*"} 0 20 500
+ wait_for_log_messages -2 {"*Node * (nodename-1) reported node * (nodename-0) as not reachable*"} 0 20 500
+
+ resume_process [srv 0 pid]
+ }
+}
diff --git a/tests/unit/cluster/links.tcl b/tests/unit/cluster/links.tcl
new file mode 100644
index 0000000..a202c37
--- /dev/null
+++ b/tests/unit/cluster/links.tcl
@@ -0,0 +1,292 @@
+proc get_links_with_peer {this_instance_id peer_nodename} {
+ set links [R $this_instance_id cluster links]
+ set links_with_peer {}
+ foreach l $links {
+ if {[dict get $l node] eq $peer_nodename} {
+ lappend links_with_peer $l
+ }
+ }
+ return $links_with_peer
+}
+
+# Return the entry in CLUSTER LINKS output by instance identified by `this_instance_id` that
+# corresponds to the link established toward a peer identified by `peer_nodename`
+proc get_link_to_peer {this_instance_id peer_nodename} {
+ set links_with_peer [get_links_with_peer $this_instance_id $peer_nodename]
+ foreach l $links_with_peer {
+ if {[dict get $l direction] eq "to"} {
+ return $l
+ }
+ }
+ return {}
+}
+
+# Return the entry in CLUSTER LINKS output by instance identified by `this_instance_id` that
+# corresponds to the link accepted from a peer identified by `peer_nodename`
+proc get_link_from_peer {this_instance_id peer_nodename} {
+ set links_with_peer [get_links_with_peer $this_instance_id $peer_nodename]
+ foreach l $links_with_peer {
+ if {[dict get $l direction] eq "from"} {
+ return $l
+ }
+ }
+ return {}
+}
+
+# Reset cluster links to their original state
+proc reset_links {id} {
+ set limit [lindex [R $id CONFIG get cluster-link-sendbuf-limit] 1]
+
+ # Set a 1 byte limit and wait for cluster cron to run
+ # (executes every 100ms) and terminate links
+ R $id CONFIG SET cluster-link-sendbuf-limit 1
+ after 150
+
+ # Reset limit
+ R $id CONFIG SET cluster-link-sendbuf-limit $limit
+
+ # Wait until the cluster links come back up for each node
+ wait_for_condition 50 100 {
+ [number_of_links $id] == [expr [number_of_peers $id] * 2]
+ } else {
+ fail "Cluster links did not come back up"
+ }
+}
+
+proc number_of_peers {id} {
+ expr [llength $::servers] - 1
+}
+
+proc number_of_links {id} {
+ llength [R $id cluster links]
+}
+
+proc publish_messages {server num_msgs msg_size} {
+ for {set i 0} {$i < $num_msgs} {incr i} {
+ $server PUBLISH channel [string repeat "x" $msg_size]
+ }
+}
+
+start_cluster 1 2 {tags {external:skip cluster}} {
+ set primary_id 0
+ set replica1_id 1
+
+ set primary [Rn $primary_id]
+ set replica1 [Rn $replica1_id]
+
+ test "Broadcast message across a cluster shard while a cluster link is down" {
+ set replica1_node_id [$replica1 CLUSTER MYID]
+
+ set channelname ch3
+
+ # subscribe on replica1
+ set subscribeclient1 [redis_deferring_client -1]
+ $subscribeclient1 deferred 1
+ $subscribeclient1 SSUBSCRIBE $channelname
+ $subscribeclient1 read
+
+ # subscribe on replica2
+ set subscribeclient2 [redis_deferring_client -2]
+ $subscribeclient2 deferred 1
+ $subscribeclient2 SSUBSCRIBE $channelname
+ $subscribeclient2 read
+
+ # Verify number of links with cluster stable state
+ assert_equal [expr [number_of_peers $primary_id]*2] [number_of_links $primary_id]
+
+ # Disconnect the cluster between primary and replica1 and publish a message.
+ $primary MULTI
+ $primary DEBUG CLUSTERLINK KILL TO $replica1_node_id
+ $primary SPUBLISH $channelname hello
+ set res [$primary EXEC]
+
+ # Verify no client exists on the primary to receive the published message.
+ assert_equal $res {OK 0}
+
+ # Wait for all the cluster links are healthy
+ wait_for_condition 50 100 {
+ [number_of_peers $primary_id]*2 == [number_of_links $primary_id]
+ } else {
+ fail "All peer links couldn't be established"
+ }
+
+ # Publish a message afterwards.
+ $primary SPUBLISH $channelname world
+
+ # Verify replica1 has received only (world) / hello is lost.
+ assert_equal "smessage ch3 world" [$subscribeclient1 read]
+
+ # Verify replica2 has received both messages (hello/world)
+ assert_equal "smessage ch3 hello" [$subscribeclient2 read]
+ assert_equal "smessage ch3 world" [$subscribeclient2 read]
+ } {} {needs:debug}
+}
+
+start_cluster 3 0 {tags {external:skip cluster}} {
+ test "Each node has two links with each peer" {
+ for {set id 0} {$id < [llength $::servers]} {incr id} {
+ # Assert that from point of view of each node, there are two links for
+ # each peer. It might take a while for cluster to stabilize so wait up
+ # to 5 seconds.
+ wait_for_condition 50 100 {
+ [number_of_peers $id]*2 == [number_of_links $id]
+ } else {
+ assert_equal [expr [number_of_peers $id]*2] [number_of_links $id]
+ }
+
+ set nodes [get_cluster_nodes $id]
+ set links [R $id cluster links]
+
+ # For each peer there should be exactly one
+ # link "to" it and one link "from" it.
+ foreach n $nodes {
+ if {[cluster_has_flag $n myself]} continue
+ set peer [dict get $n id]
+ set to 0
+ set from 0
+ foreach l $links {
+ if {[dict get $l node] eq $peer} {
+ if {[dict get $l direction] eq "to"} {
+ incr to
+ } elseif {[dict get $l direction] eq "from"} {
+ incr from
+ }
+ }
+ }
+ assert {$to eq 1}
+ assert {$from eq 1}
+ }
+ }
+ }
+
+ test {Validate cluster links format} {
+ set lines [R 0 cluster links]
+ foreach l $lines {
+ if {$l eq {}} continue
+ assert_equal [llength $l] 12
+ assert_equal 1 [dict exists $l "direction"]
+ assert_equal 1 [dict exists $l "node"]
+ assert_equal 1 [dict exists $l "create-time"]
+ assert_equal 1 [dict exists $l "events"]
+ assert_equal 1 [dict exists $l "send-buffer-allocated"]
+ assert_equal 1 [dict exists $l "send-buffer-used"]
+ }
+ }
+
+ set primary1_id 0
+ set primary2_id 1
+
+ set primary1 [Rn $primary1_id]
+ set primary2 [Rn $primary2_id]
+
+ test "Disconnect link when send buffer limit reached" {
+ # On primary1, set timeout to 1 hour so links won't get disconnected due to timeouts
+ set oldtimeout [lindex [$primary1 CONFIG get cluster-node-timeout] 1]
+ $primary1 CONFIG set cluster-node-timeout [expr 60*60*1000]
+
+ # Get primary1's links with primary2
+ set primary2_name [dict get [cluster_get_myself $primary2_id] id]
+ set orig_link_p1_to_p2 [get_link_to_peer $primary1_id $primary2_name]
+ set orig_link_p1_from_p2 [get_link_from_peer $primary1_id $primary2_name]
+
+ # On primary1, set cluster link send buffer limit to 256KB, which is large enough to not be
+ # overflowed by regular gossip messages but also small enough that it doesn't take too much
+ # memory to overflow it. If it is set too high, Redis may get OOM killed by kernel before this
+ # limit is overflowed in some RAM-limited test environments.
+ set oldlimit [lindex [$primary1 CONFIG get cluster-link-sendbuf-limit] 1]
+ $primary1 CONFIG set cluster-link-sendbuf-limit [expr 256*1024]
+ assert {[CI $primary1_id total_cluster_links_buffer_limit_exceeded] eq 0}
+
+ # To manufacture an ever-growing send buffer from primary1 to primary2,
+ # make primary2 unresponsive.
+ set primary2_pid [srv [expr -1*$primary2_id] pid]
+ pause_process $primary2_pid
+
+ # On primary1, send 128KB Pubsub messages in a loop until the send buffer of the link from
+ # primary1 to primary2 exceeds buffer limit therefore be dropped.
+ # For the send buffer to grow, we need to first exhaust TCP send buffer of primary1 and TCP
+ # receive buffer of primary2 first. The sizes of these two buffers vary by OS, but 100 128KB
+ # messages should be sufficient.
+ set i 0
+ wait_for_condition 100 0 {
+ [catch {incr i} e] == 0 &&
+ [catch {$primary1 publish channel [prepare_value [expr 128*1024]]} e] == 0 &&
+ [catch {after 500} e] == 0 &&
+ [CI $primary1_id total_cluster_links_buffer_limit_exceeded] >= 1
+ } else {
+ fail "Cluster link not freed as expected"
+ }
+
+ # A new link to primary2 should have been recreated
+ set new_link_p1_to_p2 [get_link_to_peer $primary1_id $primary2_name]
+ assert {[dict get $new_link_p1_to_p2 create-time] > [dict get $orig_link_p1_to_p2 create-time]}
+
+ # Link from primary2 should not be affected
+ set same_link_p1_from_p2 [get_link_from_peer $primary1_id $primary2_name]
+ assert {[dict get $same_link_p1_from_p2 create-time] eq [dict get $orig_link_p1_from_p2 create-time]}
+
+ # Revive primary2
+ resume_process $primary2_pid
+
+ # Reset configs on primary1 so config changes don't leak out to other tests
+ $primary1 CONFIG set cluster-node-timeout $oldtimeout
+ $primary1 CONFIG set cluster-link-sendbuf-limit $oldlimit
+
+ reset_links $primary1_id
+ }
+
+ test "Link memory increases with publishes" {
+ set server_id 0
+ set server [Rn $server_id]
+ set msg_size 10000
+ set num_msgs 10
+
+ # Remove any sendbuf limit
+ $primary1 CONFIG set cluster-link-sendbuf-limit 0
+
+ # Publish ~100KB to one of the servers
+ $server MULTI
+ $server INFO memory
+ publish_messages $server $num_msgs $msg_size
+ $server INFO memory
+ set res [$server EXEC]
+
+ set link_mem_before_pubs [getInfoProperty $res mem_cluster_links]
+
+ # Remove the first half of the response string which contains the
+ # first "INFO memory" results and search for the property again
+ set res [string range $res [expr [string length $res] / 2] end]
+ set link_mem_after_pubs [getInfoProperty $res mem_cluster_links]
+
+ # We expect the memory to have increased by more than
+ # the culmulative size of the publish messages
+ set mem_diff_floor [expr $msg_size * $num_msgs]
+ set mem_diff [expr $link_mem_after_pubs - $link_mem_before_pubs]
+ assert {$mem_diff > $mem_diff_floor}
+
+ # Reset links to ensure no leftover data for the next test
+ reset_links $server_id
+ }
+
+ test "Link memory resets after publish messages flush" {
+ set server [Rn 0]
+ set msg_size 100000
+ set num_msgs 10
+
+ set link_mem_before [status $server mem_cluster_links]
+
+ # Publish ~1MB to one of the servers
+ $server MULTI
+ publish_messages $server $num_msgs $msg_size
+ $server EXEC
+
+ # Wait until the cluster link memory has returned to below the pre-publish value.
+ # We can't guarantee it returns to the exact same value since gossip messages
+ # can cause the values to fluctuate.
+ wait_for_condition 1000 500 {
+ [status $server mem_cluster_links] <= $link_mem_before
+ } else {
+ fail "Cluster link memory did not settle back to expected range"
+ }
+ }
+}
diff --git a/tests/unit/cluster/misc.tcl b/tests/unit/cluster/misc.tcl
new file mode 100644
index 0000000..cd66697
--- /dev/null
+++ b/tests/unit/cluster/misc.tcl
@@ -0,0 +1,26 @@
+start_cluster 2 2 {tags {external:skip cluster}} {
+ test {Key lazy expires during key migration} {
+ R 0 DEBUG SET-ACTIVE-EXPIRE 0
+
+ set key_slot [R 0 CLUSTER KEYSLOT FOO]
+ R 0 set FOO BAR PX 10
+ set src_id [R 0 CLUSTER MYID]
+ set trg_id [R 1 CLUSTER MYID]
+ R 0 CLUSTER SETSLOT $key_slot MIGRATING $trg_id
+ R 1 CLUSTER SETSLOT $key_slot IMPORTING $src_id
+ after 11
+ assert_error {ASK*} {R 0 GET FOO}
+ R 0 ping
+ } {PONG}
+
+ test "Coverage: Basic cluster commands" {
+ assert_equal {OK} [R 0 CLUSTER saveconfig]
+
+ set id [R 0 CLUSTER MYID]
+ assert_equal {0} [R 0 CLUSTER count-failure-reports $id]
+
+ R 0 flushall
+ assert_equal {OK} [R 0 CLUSTER flushslots]
+ }
+}
+
diff --git a/tests/unit/cluster/multi-slot-operations.tcl b/tests/unit/cluster/multi-slot-operations.tcl
new file mode 100644
index 0000000..cc7bb7a
--- /dev/null
+++ b/tests/unit/cluster/multi-slot-operations.tcl
@@ -0,0 +1,109 @@
+# This test uses a custom slot allocation for testing
+proc cluster_allocate_with_continuous_slots_local {n} {
+ R 0 cluster ADDSLOTSRANGE 0 3276
+ R 1 cluster ADDSLOTSRANGE 3277 6552
+ R 2 cluster ADDSLOTSRANGE 6553 9828
+ R 3 cluster ADDSLOTSRANGE 9829 13104
+ R 4 cluster ADDSLOTSRANGE 13105 16383
+}
+
+start_cluster 5 0 {tags {external:skip cluster}} {
+
+set master1 [srv 0 "client"]
+set master2 [srv -1 "client"]
+set master3 [srv -2 "client"]
+set master4 [srv -3 "client"]
+set master5 [srv -4 "client"]
+
+test "Continuous slots distribution" {
+ assert_match "* 0-3276*" [$master1 CLUSTER NODES]
+ assert_match "* 3277-6552*" [$master2 CLUSTER NODES]
+ assert_match "* 6553-9828*" [$master3 CLUSTER NODES]
+ assert_match "* 9829-13104*" [$master4 CLUSTER NODES]
+ assert_match "* 13105-16383*" [$master5 CLUSTER NODES]
+ assert_match "*0 3276*" [$master1 CLUSTER SLOTS]
+ assert_match "*3277 6552*" [$master2 CLUSTER SLOTS]
+ assert_match "*6553 9828*" [$master3 CLUSTER SLOTS]
+ assert_match "*9829 13104*" [$master4 CLUSTER SLOTS]
+ assert_match "*13105 16383*" [$master5 CLUSTER SLOTS]
+
+ $master1 CLUSTER DELSLOTSRANGE 3001 3050
+ assert_match "* 0-3000 3051-3276*" [$master1 CLUSTER NODES]
+ assert_match "*0 3000*3051 3276*" [$master1 CLUSTER SLOTS]
+
+ $master2 CLUSTER DELSLOTSRANGE 5001 5500
+ assert_match "* 3277-5000 5501-6552*" [$master2 CLUSTER NODES]
+ assert_match "*3277 5000*5501 6552*" [$master2 CLUSTER SLOTS]
+
+ $master3 CLUSTER DELSLOTSRANGE 7001 7100 8001 8500
+ assert_match "* 6553-7000 7101-8000 8501-9828*" [$master3 CLUSTER NODES]
+ assert_match "*6553 7000*7101 8000*8501 9828*" [$master3 CLUSTER SLOTS]
+
+ $master4 CLUSTER DELSLOTSRANGE 11001 12000 12101 12200
+ assert_match "* 9829-11000 12001-12100 12201-13104*" [$master4 CLUSTER NODES]
+ assert_match "*9829 11000*12001 12100*12201 13104*" [$master4 CLUSTER SLOTS]
+
+ $master5 CLUSTER DELSLOTSRANGE 13501 14000 15001 16000
+ assert_match "* 13105-13500 14001-15000 16001-16383*" [$master5 CLUSTER NODES]
+ assert_match "*13105 13500*14001 15000*16001 16383*" [$master5 CLUSTER SLOTS]
+}
+
+test "ADDSLOTS command with several boundary conditions test suite" {
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster ADDSLOTS 3001 aaa}
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster ADDSLOTS 3001 -1000}
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster ADDSLOTS 3001 30003}
+
+ assert_error "ERR Slot 3200 is already busy" {R 0 cluster ADDSLOTS 3200}
+ assert_error "ERR Slot 8501 is already busy" {R 0 cluster ADDSLOTS 8501}
+
+ assert_error "ERR Slot 3001 specified multiple times" {R 0 cluster ADDSLOTS 3001 3002 3001}
+}
+
+test "ADDSLOTSRANGE command with several boundary conditions test suite" {
+ # Add multiple slots with incorrect argument number
+ assert_error "ERR wrong number of arguments for 'cluster|addslotsrange' command" {R 0 cluster ADDSLOTSRANGE 3001 3020 3030}
+
+ # Add multiple slots with invalid input slot
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster ADDSLOTSRANGE 3001 3020 3030 aaa}
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster ADDSLOTSRANGE 3001 3020 3030 70000}
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster ADDSLOTSRANGE 3001 3020 -1000 3030}
+
+ # Add multiple slots when start slot number is greater than the end slot
+ assert_error "ERR start slot number 3030 is greater than end slot number 3025" {R 0 cluster ADDSLOTSRANGE 3001 3020 3030 3025}
+
+ # Add multiple slots with busy slot
+ assert_error "ERR Slot 3200 is already busy" {R 0 cluster ADDSLOTSRANGE 3001 3020 3200 3250}
+
+ # Add multiple slots with assigned multiple times
+ assert_error "ERR Slot 3001 specified multiple times" {R 0 cluster ADDSLOTSRANGE 3001 3020 3001 3020}
+}
+
+test "DELSLOTSRANGE command with several boundary conditions test suite" {
+ # Delete multiple slots with incorrect argument number
+ assert_error "ERR wrong number of arguments for 'cluster|delslotsrange' command" {R 0 cluster DELSLOTSRANGE 1000 2000 2100}
+ assert_match "* 0-3000 3051-3276*" [$master1 CLUSTER NODES]
+ assert_match "*0 3000*3051 3276*" [$master1 CLUSTER SLOTS]
+
+ # Delete multiple slots with invalid input slot
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster DELSLOTSRANGE 1000 2000 2100 aaa}
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster DELSLOTSRANGE 1000 2000 2100 70000}
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster DELSLOTSRANGE 1000 2000 -2100 2200}
+ assert_match "* 0-3000 3051-3276*" [$master1 CLUSTER NODES]
+ assert_match "*0 3000*3051 3276*" [$master1 CLUSTER SLOTS]
+
+ # Delete multiple slots when start slot number is greater than the end slot
+ assert_error "ERR start slot number 5800 is greater than end slot number 5750" {R 1 cluster DELSLOTSRANGE 5600 5700 5800 5750}
+ assert_match "* 3277-5000 5501-6552*" [$master2 CLUSTER NODES]
+ assert_match "*3277 5000*5501 6552*" [$master2 CLUSTER SLOTS]
+
+ # Delete multiple slots with already unassigned
+ assert_error "ERR Slot 7001 is already unassigned" {R 2 cluster DELSLOTSRANGE 7001 7100 9000 9200}
+ assert_match "* 6553-7000 7101-8000 8501-9828*" [$master3 CLUSTER NODES]
+ assert_match "*6553 7000*7101 8000*8501 9828*" [$master3 CLUSTER SLOTS]
+
+ # Delete multiple slots with assigned multiple times
+ assert_error "ERR Slot 12500 specified multiple times" {R 3 cluster DELSLOTSRANGE 12500 12600 12500 12600}
+ assert_match "* 9829-11000 12001-12100 12201-13104*" [$master4 CLUSTER NODES]
+ assert_match "*9829 11000*12001 12100*12201 13104*" [$master4 CLUSTER SLOTS]
+}
+} cluster_allocate_with_continuous_slots_local
diff --git a/tests/unit/cluster/scripting.tcl b/tests/unit/cluster/scripting.tcl
new file mode 100644
index 0000000..1ade36e
--- /dev/null
+++ b/tests/unit/cluster/scripting.tcl
@@ -0,0 +1,70 @@
+start_cluster 1 0 {tags {external:skip cluster}} {
+
+ test {Eval scripts with shebangs and functions default to no cross slots} {
+ # Test that scripts with shebang block cross slot operations
+ assert_error "ERR Script attempted to access keys that do not hash to the same slot*" {
+ r 0 eval {#!lua
+ redis.call('set', 'foo', 'bar')
+ redis.call('set', 'bar', 'foo')
+ return 'OK'
+ } 0}
+
+ # Test the functions by default block cross slot operations
+ r 0 function load REPLACE {#!lua name=crossslot
+ local function test_cross_slot(keys, args)
+ redis.call('set', 'foo', 'bar')
+ redis.call('set', 'bar', 'foo')
+ return 'OK'
+ end
+
+ redis.register_function('test_cross_slot', test_cross_slot)}
+ assert_error "ERR Script attempted to access keys that do not hash to the same slot*" {r FCALL test_cross_slot 0}
+ }
+
+ test {Cross slot commands are allowed by default for eval scripts and with allow-cross-slot-keys flag} {
+ # Old style lua scripts are allowed to access cross slot operations
+ r 0 eval "redis.call('set', 'foo', 'bar'); redis.call('set', 'bar', 'foo')" 0
+
+ # scripts with allow-cross-slot-keys flag are allowed
+ r 0 eval {#!lua flags=allow-cross-slot-keys
+ redis.call('set', 'foo', 'bar'); redis.call('set', 'bar', 'foo')
+ } 0
+
+ # Functions with allow-cross-slot-keys flag are allowed
+ r 0 function load REPLACE {#!lua name=crossslot
+ local function test_cross_slot(keys, args)
+ redis.call('set', 'foo', 'bar')
+ redis.call('set', 'bar', 'foo')
+ return 'OK'
+ end
+
+ redis.register_function{function_name='test_cross_slot', callback=test_cross_slot, flags={ 'allow-cross-slot-keys' }}}
+ r FCALL test_cross_slot 0
+ }
+
+ test {Cross slot commands are also blocked if they disagree with pre-declared keys} {
+ assert_error "ERR Script attempted to access keys that do not hash to the same slot*" {
+ r 0 eval {#!lua
+ redis.call('set', 'foo', 'bar')
+ return 'OK'
+ } 1 bar}
+ }
+
+ test "Function no-cluster flag" {
+ R 0 function load {#!lua name=test
+ redis.register_function{function_name='f1', callback=function() return 'hello' end, flags={'no-cluster'}}
+ }
+ catch {R 0 fcall f1 0} e
+ assert_match {*Can not run script on cluster, 'no-cluster' flag is set*} $e
+ }
+
+ test "Script no-cluster flag" {
+ catch {
+ R 0 eval {#!lua flags=no-cluster
+ return 1
+ } 0
+ } e
+
+ assert_match {*Can not run script on cluster, 'no-cluster' flag is set*} $e
+ }
+}
diff --git a/tests/unit/cluster/slot-ownership.tcl b/tests/unit/cluster/slot-ownership.tcl
new file mode 100644
index 0000000..0f3e3cc
--- /dev/null
+++ b/tests/unit/cluster/slot-ownership.tcl
@@ -0,0 +1,61 @@
+start_cluster 2 2 {tags {external:skip cluster}} {
+
+ test "Verify that slot ownership transfer through gossip propagates deletes to replicas" {
+ assert {[s -2 role] eq {slave}}
+ wait_for_condition 1000 50 {
+ [s -2 master_link_status] eq {up}
+ } else {
+ fail "Instance #2 master link status is not up"
+ }
+
+ assert {[s -3 role] eq {slave}}
+ wait_for_condition 1000 50 {
+ [s -3 master_link_status] eq {up}
+ } else {
+ fail "Instance #3 master link status is not up"
+ }
+
+ # Set a single key that will be used to test deletion
+ set key "FOO"
+ R 0 SET $key TEST
+ set key_slot [R 0 cluster keyslot $key]
+ set slot_keys_num [R 0 cluster countkeysinslot $key_slot]
+ assert {$slot_keys_num > 0}
+
+ # Wait for replica to have the key
+ R 2 readonly
+ wait_for_condition 1000 50 {
+ [R 2 exists $key] eq "1"
+ } else {
+ fail "Test key was not replicated"
+ }
+
+ assert_equal [R 2 cluster countkeysinslot $key_slot] $slot_keys_num
+
+ # Assert other shards in cluster doesn't have the key
+ assert_equal [R 1 cluster countkeysinslot $key_slot] "0"
+ assert_equal [R 3 cluster countkeysinslot $key_slot] "0"
+
+ set nodeid [R 1 cluster myid]
+
+ R 1 cluster bumpepoch
+ # Move $key_slot to node 1
+ assert_equal [R 1 cluster setslot $key_slot node $nodeid] "OK"
+
+ wait_for_cluster_propagation
+
+ # src master will delete keys in the slot
+ wait_for_condition 50 100 {
+ [R 0 cluster countkeysinslot $key_slot] eq 0
+ } else {
+ fail "master 'countkeysinslot $key_slot' did not eq 0"
+ }
+
+ # src replica will delete keys in the slot
+ wait_for_condition 50 100 {
+ [R 2 cluster countkeysinslot $key_slot] eq 0
+ } else {
+ fail "replica 'countkeysinslot $key_slot' did not eq 0"
+ }
+ }
+}