summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-14 13:40:54 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-14 13:40:54 +0000
commit317c0644ccf108aa23ef3fd8358bd66c2840bfc0 (patch)
treec417b3d25c86b775989cb5ac042f37611b626c8a /tests
parentInitial commit. (diff)
downloadredis-317c0644ccf108aa23ef3fd8358bd66c2840bfc0.tar.xz
redis-317c0644ccf108aa23ef3fd8358bd66c2840bfc0.zip
Adding upstream version 5:7.2.4.upstream/5%7.2.4
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tests')
-rw-r--r--tests/README.md63
-rw-r--r--tests/assets/corrupt_empty_keys.rdbbin0 -> 280 bytes
-rw-r--r--tests/assets/corrupt_ziplist.rdbbin0 -> 1415 bytes
-rw-r--r--tests/assets/default.conf37
-rw-r--r--tests/assets/encodings.rdbbin0 -> 667 bytes
-rw-r--r--tests/assets/hash-ziplist.rdbbin0 -> 137 bytes
-rw-r--r--tests/assets/hash-zipmap.rdbbin0 -> 35 bytes
-rw-r--r--tests/assets/list-quicklist.rdbbin0 -> 123 bytes
-rw-r--r--tests/assets/minimal.conf5
-rw-r--r--tests/assets/nodefaultuser.acl2
-rw-r--r--tests/assets/rdb-preamble.aofbin0 -> 169 bytes
-rw-r--r--tests/assets/scriptbackup.rdbbin0 -> 225 bytes
-rw-r--r--tests/assets/test_cli_hint_suite.txt111
-rw-r--r--tests/assets/user.acl3
-rw-r--r--tests/assets/userwithselectors.acl2
-rw-r--r--tests/assets/zset-ziplist.rdbbin0 -> 135 bytes
-rw-r--r--tests/cluster/cluster.tcl222
-rw-r--r--tests/cluster/run.tcl32
-rw-r--r--tests/cluster/tests/00-base.tcl89
-rw-r--r--tests/cluster/tests/01-faildet.tcl38
-rw-r--r--tests/cluster/tests/02-failover.tcl65
-rw-r--r--tests/cluster/tests/03-failover-loop.tcl117
-rw-r--r--tests/cluster/tests/04-resharding.tcl196
-rw-r--r--tests/cluster/tests/05-slave-selection.tcl188
-rw-r--r--tests/cluster/tests/06-slave-stop-cond.tcl77
-rw-r--r--tests/cluster/tests/07-replica-migration.tcl103
-rw-r--r--tests/cluster/tests/08-update-msg.tcl90
-rw-r--r--tests/cluster/tests/09-pubsub.tcl40
-rw-r--r--tests/cluster/tests/10-manual-failover.tcl192
-rw-r--r--tests/cluster/tests/11-manual-takeover.tcl71
-rw-r--r--tests/cluster/tests/12-replica-migration-2.tcl75
-rw-r--r--tests/cluster/tests/12.1-replica-migration-3.tcl65
-rw-r--r--tests/cluster/tests/13-no-failover-option.tcl61
-rw-r--r--tests/cluster/tests/14-consistency-check.tcl124
-rw-r--r--tests/cluster/tests/15-cluster-slots.tcl128
-rw-r--r--tests/cluster/tests/16-transactions-on-replica.tcl85
-rw-r--r--tests/cluster/tests/17-diskless-load-swapdb.tcl86
-rw-r--r--tests/cluster/tests/18-info.tcl45
-rw-r--r--tests/cluster/tests/19-cluster-nodes-slots.tcl50
-rw-r--r--tests/cluster/tests/20-half-migrated-slot.tcl98
-rw-r--r--tests/cluster/tests/21-many-slot-migration.tcl64
-rw-r--r--tests/cluster/tests/22-replica-in-sync.tcl146
-rw-r--r--tests/cluster/tests/25-pubsubshard-slot-migration.tcl171
-rw-r--r--tests/cluster/tests/26-pubsubshard.tcl94
-rw-r--r--tests/cluster/tests/28-cluster-shards.tcl287
-rw-r--r--tests/cluster/tests/29-slot-migration-response.tcl50
-rw-r--r--tests/cluster/tests/helpers/onlydots.tcl16
-rw-r--r--tests/cluster/tests/includes/init-tests.tcl91
-rw-r--r--tests/cluster/tests/includes/utils.tcl36
-rw-r--r--tests/cluster/tmp/.gitignore2
-rw-r--r--tests/helpers/bg_block_op.tcl55
-rw-r--r--tests/helpers/bg_complex_data.tcl13
-rw-r--r--tests/helpers/fake_redis_node.tcl58
-rw-r--r--tests/helpers/gen_write_load.tcl18
-rw-r--r--tests/instances.tcl742
-rw-r--r--tests/integration/aof-multi-part.tcl1332
-rw-r--r--tests/integration/aof-race.tcl37
-rw-r--r--tests/integration/aof.tcl681
-rw-r--r--tests/integration/block-repl.tcl51
-rw-r--r--tests/integration/convert-ziplist-hash-on-load.tcl28
-rw-r--r--tests/integration/convert-ziplist-zset-on-load.tcl28
-rw-r--r--tests/integration/convert-zipmap-hash-on-load.tcl39
-rw-r--r--tests/integration/corrupt-dump-fuzzer.tcl230
-rw-r--r--tests/integration/corrupt-dump.tcl833
-rw-r--r--tests/integration/dismiss-mem.tcl101
-rw-r--r--tests/integration/failover.tcl294
-rw-r--r--tests/integration/logging.tcl61
-rw-r--r--tests/integration/psync2-master-restart.tcl218
-rw-r--r--tests/integration/psync2-pingoff.tcl250
-rw-r--r--tests/integration/psync2-reg.tcl82
-rw-r--r--tests/integration/psync2.tcl384
-rw-r--r--tests/integration/rdb.tcl419
-rw-r--r--tests/integration/redis-benchmark.tcl171
-rw-r--r--tests/integration/redis-cli.tcl609
-rw-r--r--tests/integration/replication-2.tcl93
-rw-r--r--tests/integration/replication-3.tcl130
-rw-r--r--tests/integration/replication-4.tcl295
-rw-r--r--tests/integration/replication-buffer.tcl307
-rw-r--r--tests/integration/replication-psync.tcl143
-rw-r--r--tests/integration/replication.tcl1456
-rw-r--r--tests/integration/shutdown.tcl234
-rw-r--r--tests/modules/Makefile83
-rw-r--r--tests/modules/aclcheck.c269
-rw-r--r--tests/modules/auth.c270
-rw-r--r--tests/modules/basics.c1052
-rw-r--r--tests/modules/blockedclient.c712
-rw-r--r--tests/modules/blockonbackground.c295
-rw-r--r--tests/modules/blockonkeys.c645
-rw-r--r--tests/modules/cmdintrospection.c158
-rw-r--r--tests/modules/commandfilter.c251
-rw-r--r--tests/modules/datatype.c314
-rw-r--r--tests/modules/datatype2.c739
-rw-r--r--tests/modules/defragtest.c235
-rw-r--r--tests/modules/eventloop.c276
-rw-r--r--tests/modules/fork.c96
-rw-r--r--tests/modules/getchannels.c69
-rw-r--r--tests/modules/getkeys.c178
-rw-r--r--tests/modules/hash.c90
-rw-r--r--tests/modules/hooks.c516
-rw-r--r--tests/modules/infotest.c119
-rw-r--r--tests/modules/keyspace_events.c440
-rw-r--r--tests/modules/keyspecs.c236
-rw-r--r--tests/modules/list.c252
-rw-r--r--tests/modules/mallocsize.c237
-rw-r--r--tests/modules/misc.c571
-rw-r--r--tests/modules/moduleauthtwo.c43
-rw-r--r--tests/modules/moduleconfigs.c195
-rw-r--r--tests/modules/moduleconfigstwo.c39
-rw-r--r--tests/modules/postnotifications.c303
-rw-r--r--tests/modules/propagate.c403
-rw-r--r--tests/modules/publish.c57
-rw-r--r--tests/modules/rdbloadsave.c162
-rw-r--r--tests/modules/reply.c214
-rw-r--r--tests/modules/scan.c121
-rw-r--r--tests/modules/stream.c258
-rw-r--r--tests/modules/subcommands.c112
-rw-r--r--tests/modules/test_lazyfree.c196
-rw-r--r--tests/modules/testrdb.c405
-rw-r--r--tests/modules/timer.c102
-rw-r--r--tests/modules/usercall.c228
-rw-r--r--tests/modules/zset.c91
-rw-r--r--tests/sentinel/run.tcl36
-rw-r--r--tests/sentinel/tests/00-base.tcl210
-rw-r--r--tests/sentinel/tests/01-conf-update.tcl50
-rw-r--r--tests/sentinel/tests/02-slaves-reconf.tcl91
-rw-r--r--tests/sentinel/tests/03-runtime-reconf.tcl225
-rw-r--r--tests/sentinel/tests/04-slave-selection.tcl5
-rw-r--r--tests/sentinel/tests/05-manual.tcl94
-rw-r--r--tests/sentinel/tests/06-ckquorum.tcl42
-rw-r--r--tests/sentinel/tests/07-down-conditions.tcl104
-rw-r--r--tests/sentinel/tests/08-hostname-conf.tcl69
-rw-r--r--tests/sentinel/tests/09-acl-support.tcl56
-rw-r--r--tests/sentinel/tests/10-replica-priority.tcl76
-rw-r--r--tests/sentinel/tests/11-port-0.tcl33
-rw-r--r--tests/sentinel/tests/12-master-reboot.tcl103
-rw-r--r--tests/sentinel/tests/13-info-command.tcl47
-rw-r--r--tests/sentinel/tests/14-debug-command.tcl9
-rw-r--r--tests/sentinel/tests/15-config-set-config-get.tcl58
-rwxr-xr-xtests/sentinel/tests/helpers/check_leaked_fds.tcl79
-rw-r--r--tests/sentinel/tests/includes/init-tests.tcl63
-rw-r--r--tests/sentinel/tests/includes/sentinel.conf9
-rw-r--r--tests/sentinel/tests/includes/start-init-tests.tcl18
-rw-r--r--tests/sentinel/tests/includes/utils.tcl22
-rw-r--r--tests/sentinel/tmp/.gitignore2
-rw-r--r--tests/support/aofmanifest.tcl169
-rw-r--r--tests/support/benchmark.tcl33
-rw-r--r--tests/support/cli.tcl36
-rw-r--r--tests/support/cluster.tcl367
-rw-r--r--tests/support/cluster_util.tcl201
-rw-r--r--tests/support/redis.tcl466
-rw-r--r--tests/support/response_transformers.tcl105
-rw-r--r--tests/support/server.tcl789
-rw-r--r--tests/support/test.tcl267
-rw-r--r--tests/support/tmpfile.tcl15
-rw-r--r--tests/support/util.tcl1117
-rw-r--r--tests/test_helper.tcl937
-rw-r--r--tests/tmp/.gitignore1
-rw-r--r--tests/unit/acl-v2.tcl525
-rw-r--r--tests/unit/acl.tcl1173
-rw-r--r--tests/unit/aofrw.tcl232
-rw-r--r--tests/unit/auth.tcl89
-rw-r--r--tests/unit/bitfield.tcl263
-rw-r--r--tests/unit/bitops.tcl593
-rw-r--r--tests/unit/client-eviction.tcl586
-rw-r--r--tests/unit/cluster/announced-endpoints.tcl42
-rw-r--r--tests/unit/cluster/cli.tcl416
-rw-r--r--tests/unit/cluster/cluster-response-tls.tcl110
-rw-r--r--tests/unit/cluster/hostnames.tcl203
-rw-r--r--tests/unit/cluster/human-announced-nodename.tcl29
-rw-r--r--tests/unit/cluster/links.tcl292
-rw-r--r--tests/unit/cluster/misc.tcl26
-rw-r--r--tests/unit/cluster/multi-slot-operations.tcl109
-rw-r--r--tests/unit/cluster/scripting.tcl70
-rw-r--r--tests/unit/cluster/slot-ownership.tcl61
-rw-r--r--tests/unit/dump.tcl410
-rw-r--r--tests/unit/expire.tcl835
-rw-r--r--tests/unit/functions.tcl1233
-rw-r--r--tests/unit/geo.tcl768
-rw-r--r--tests/unit/hyperloglog.tcl271
-rw-r--r--tests/unit/info-command.tcl62
-rw-r--r--tests/unit/info.tcl346
-rw-r--r--tests/unit/introspection-2.tcl245
-rw-r--r--tests/unit/introspection.tcl829
-rw-r--r--tests/unit/keyspace.tcl502
-rw-r--r--tests/unit/latency-monitor.tcl166
-rw-r--r--tests/unit/lazyfree.tcl90
-rw-r--r--tests/unit/limits.tcl21
-rw-r--r--tests/unit/maxmemory.tcl590
-rw-r--r--tests/unit/memefficiency.tcl580
-rw-r--r--tests/unit/moduleapi/aclcheck.tcl137
-rw-r--r--tests/unit/moduleapi/async_rm_call.tcl437
-rw-r--r--tests/unit/moduleapi/auth.tcl90
-rw-r--r--tests/unit/moduleapi/basics.tcl46
-rw-r--r--tests/unit/moduleapi/blockedclient.tcl287
-rw-r--r--tests/unit/moduleapi/blockonbackground.tcl126
-rw-r--r--tests/unit/moduleapi/blockonkeys.tcl366
-rw-r--r--tests/unit/moduleapi/cluster.tcl222
-rw-r--r--tests/unit/moduleapi/cmdintrospection.tcl50
-rw-r--r--tests/unit/moduleapi/commandfilter.tcl175
-rw-r--r--tests/unit/moduleapi/datatype.tcl134
-rw-r--r--tests/unit/moduleapi/datatype2.tcl232
-rw-r--r--tests/unit/moduleapi/defrag.tcl46
-rw-r--r--tests/unit/moduleapi/eventloop.tcl28
-rw-r--r--tests/unit/moduleapi/fork.tcl49
-rw-r--r--tests/unit/moduleapi/getchannels.tcl40
-rw-r--r--tests/unit/moduleapi/getkeys.tcl80
-rw-r--r--tests/unit/moduleapi/hash.tcl27
-rw-r--r--tests/unit/moduleapi/hooks.tcl321
-rw-r--r--tests/unit/moduleapi/infotest.tcl131
-rw-r--r--tests/unit/moduleapi/infra.tcl25
-rw-r--r--tests/unit/moduleapi/keyspace_events.tcl118
-rw-r--r--tests/unit/moduleapi/keyspecs.tcl160
-rw-r--r--tests/unit/moduleapi/list.tcl160
-rw-r--r--tests/unit/moduleapi/mallocsize.tcl21
-rw-r--r--tests/unit/moduleapi/misc.tcl555
-rw-r--r--tests/unit/moduleapi/moduleauth.tcl405
-rw-r--r--tests/unit/moduleapi/moduleconfigs.tcl247
-rw-r--r--tests/unit/moduleapi/postnotifications.tcl219
-rw-r--r--tests/unit/moduleapi/propagate.tcl763
-rw-r--r--tests/unit/moduleapi/publish.tcl34
-rw-r--r--tests/unit/moduleapi/rdbloadsave.tcl200
-rw-r--r--tests/unit/moduleapi/reply.tcl152
-rw-r--r--tests/unit/moduleapi/scan.tcl69
-rw-r--r--tests/unit/moduleapi/stream.tcl176
-rw-r--r--tests/unit/moduleapi/subcommands.tcl57
-rw-r--r--tests/unit/moduleapi/test_lazyfree.tcl32
-rw-r--r--tests/unit/moduleapi/testrdb.tcl306
-rw-r--r--tests/unit/moduleapi/timer.tcl99
-rw-r--r--tests/unit/moduleapi/usercall.tcl136
-rw-r--r--tests/unit/moduleapi/zset.tcl40
-rw-r--r--tests/unit/multi.tcl923
-rw-r--r--tests/unit/networking.tcl172
-rw-r--r--tests/unit/obuf-limits.tcl230
-rw-r--r--tests/unit/oom-score-adj.tcl131
-rw-r--r--tests/unit/other.tcl428
-rw-r--r--tests/unit/pause.tcl364
-rw-r--r--tests/unit/printver.tcl6
-rw-r--r--tests/unit/protocol.tcl250
-rw-r--r--tests/unit/pubsub.tcl506
-rw-r--r--tests/unit/pubsubshard.tcl164
-rw-r--r--tests/unit/querybuf.tcl96
-rw-r--r--tests/unit/quit.tcl33
-rw-r--r--tests/unit/replybufsize.tcl47
-rw-r--r--tests/unit/scan.tcl433
-rw-r--r--tests/unit/scripting.tcl2213
-rw-r--r--tests/unit/shutdown.tcl133
-rw-r--r--tests/unit/slowlog.tcl228
-rw-r--r--tests/unit/sort.tcl359
-rw-r--r--tests/unit/tls.tcl158
-rw-r--r--tests/unit/tracking.tcl902
-rw-r--r--tests/unit/type/hash.tcl846
-rw-r--r--tests/unit/type/incr.tcl214
-rw-r--r--tests/unit/type/list-2.tcl47
-rw-r--r--tests/unit/type/list-3.tcl232
-rw-r--r--tests/unit/type/list-common.tcl4
-rw-r--r--tests/unit/type/list.tcl2363
-rw-r--r--tests/unit/type/set.tcl1305
-rw-r--r--tests/unit/type/stream-cgroups.tcl1297
-rw-r--r--tests/unit/type/stream.tcl940
-rw-r--r--tests/unit/type/string.tcl674
-rw-r--r--tests/unit/type/zset.tcl2654
-rw-r--r--tests/unit/violations.tcl103
-rw-r--r--tests/unit/wait.tcl505
263 files changed, 67670 insertions, 0 deletions
diff --git a/tests/README.md b/tests/README.md
new file mode 100644
index 0000000..1aa98dc
--- /dev/null
+++ b/tests/README.md
@@ -0,0 +1,63 @@
+Redis Test Suite
+================
+
+The normal execution mode of the test suite involves starting and manipulating
+local `redis-server` instances, inspecting process state, log files, etc.
+
+The test suite also supports execution against an external server, which is
+enabled using the `--host` and `--port` parameters. When executing against an
+external server, tests tagged `external:skip` are skipped.
+
+There are additional runtime options that can further adjust the test suite to
+match different external server configurations:
+
+| Option | Impact |
+| -------------------- | -------------------------------------------------------- |
+| `--singledb` | Only use database 0, don't assume others are supported. |
+| `--ignore-encoding` | Skip all checks for specific encoding. |
+| `--ignore-digest` | Skip key value digest validations. |
+| `--cluster-mode` | Run in strict Redis Cluster compatibility mode. |
+| `--large-memory` | Enables tests that consume more than 100mb |
+
+Tags
+----
+
+Tags are applied to tests to classify them according to the subsystem they test,
+but also to indicate compatibility with different run modes and required
+capabilities.
+
+Tags can be applied in different context levels:
+* `start_server` context
+* `tags` context that bundles several tests together
+* A single test context.
+
+The following compatibility and capability tags are currently used:
+
+| Tag | Indicates |
+| --------------------- | --------- |
+| `external:skip` | Not compatible with external servers. |
+| `cluster:skip` | Not compatible with `--cluster-mode`. |
+| `large-memory` | Test that requires more than 100mb |
+| `tls:skip` | Not compatible with `--tls`. |
+| `needs:repl` | Uses replication and needs to be able to `SYNC` from server. |
+| `needs:debug` | Uses the `DEBUG` command or other debugging focused commands (like `OBJECT REFCOUNT`). |
+| `needs:pfdebug` | Uses the `PFDEBUG` command. |
+| `needs:config-maxmemory` | Uses `CONFIG SET` to manipulate memory limit, eviction policies, etc. |
+| `needs:config-resetstat` | Uses `CONFIG RESETSTAT` to reset statistics. |
+| `needs:reset` | Uses `RESET` to reset client connections. |
+| `needs:save` | Uses `SAVE` or `BGSAVE` to create an RDB file. |
+
+When using an external server (`--host` and `--port`), filtering using the
+`external:skip` tags is done automatically.
+
+When using `--cluster-mode`, filtering using the `cluster:skip` tag is done
+automatically.
+
+When not using `--large-memory`, filtering using the `largemem:skip` tag is done
+automatically.
+
+In addition, it is possible to specify additional configuration. For example, to
+run tests on a server that does not permit `SYNC` use:
+
+ ./runtest --host <host> --port <port> --tags -needs:repl
+
diff --git a/tests/assets/corrupt_empty_keys.rdb b/tests/assets/corrupt_empty_keys.rdb
new file mode 100644
index 0000000..98b6a14
--- /dev/null
+++ b/tests/assets/corrupt_empty_keys.rdb
Binary files differ
diff --git a/tests/assets/corrupt_ziplist.rdb b/tests/assets/corrupt_ziplist.rdb
new file mode 100644
index 0000000..b40ada8
--- /dev/null
+++ b/tests/assets/corrupt_ziplist.rdb
Binary files differ
diff --git a/tests/assets/default.conf b/tests/assets/default.conf
new file mode 100644
index 0000000..de460cc
--- /dev/null
+++ b/tests/assets/default.conf
@@ -0,0 +1,37 @@
+# Redis configuration for testing.
+
+always-show-logo yes
+notify-keyspace-events KEA
+daemonize no
+pidfile /var/run/redis.pid
+port 6379
+timeout 0
+bind 127.0.0.1
+loglevel verbose
+logfile ''
+databases 16
+latency-monitor-threshold 1
+repl-diskless-sync-delay 0
+
+# Turn off RDB by default (to speedup tests)
+# Note the infrastructure in server.tcl uses a dict, we can't provide several save directives
+save ''
+
+rdbcompression yes
+dbfilename dump.rdb
+dir ./
+
+slave-serve-stale-data yes
+appendonly no
+appendfsync everysec
+no-appendfsync-on-rewrite no
+activerehashing yes
+
+enable-protected-configs yes
+enable-debug-command yes
+enable-module-command yes
+
+propagation-error-behavior panic
+
+# Make sure shutdown doesn't fail if there's an initial AOFRW
+shutdown-on-sigterm force
diff --git a/tests/assets/encodings.rdb b/tests/assets/encodings.rdb
new file mode 100644
index 0000000..9fd9b70
--- /dev/null
+++ b/tests/assets/encodings.rdb
Binary files differ
diff --git a/tests/assets/hash-ziplist.rdb b/tests/assets/hash-ziplist.rdb
new file mode 100644
index 0000000..bcc39a3
--- /dev/null
+++ b/tests/assets/hash-ziplist.rdb
Binary files differ
diff --git a/tests/assets/hash-zipmap.rdb b/tests/assets/hash-zipmap.rdb
new file mode 100644
index 0000000..27a42ed
--- /dev/null
+++ b/tests/assets/hash-zipmap.rdb
Binary files differ
diff --git a/tests/assets/list-quicklist.rdb b/tests/assets/list-quicklist.rdb
new file mode 100644
index 0000000..a9101a1
--- /dev/null
+++ b/tests/assets/list-quicklist.rdb
Binary files differ
diff --git a/tests/assets/minimal.conf b/tests/assets/minimal.conf
new file mode 100644
index 0000000..ae14ae8
--- /dev/null
+++ b/tests/assets/minimal.conf
@@ -0,0 +1,5 @@
+# Minimal configuration for testing.
+always-show-logo yes
+daemonize no
+pidfile /var/run/redis.pid
+loglevel verbose
diff --git a/tests/assets/nodefaultuser.acl b/tests/assets/nodefaultuser.acl
new file mode 100644
index 0000000..2557c7f
--- /dev/null
+++ b/tests/assets/nodefaultuser.acl
@@ -0,0 +1,2 @@
+user alice on nopass ~* +@all
+user bob on nopass ~* &* +@all \ No newline at end of file
diff --git a/tests/assets/rdb-preamble.aof b/tests/assets/rdb-preamble.aof
new file mode 100644
index 0000000..73f2301
--- /dev/null
+++ b/tests/assets/rdb-preamble.aof
Binary files differ
diff --git a/tests/assets/scriptbackup.rdb b/tests/assets/scriptbackup.rdb
new file mode 100644
index 0000000..963715d
--- /dev/null
+++ b/tests/assets/scriptbackup.rdb
Binary files differ
diff --git a/tests/assets/test_cli_hint_suite.txt b/tests/assets/test_cli_hint_suite.txt
new file mode 100644
index 0000000..18c1fe0
--- /dev/null
+++ b/tests/assets/test_cli_hint_suite.txt
@@ -0,0 +1,111 @@
+# Test suite for redis-cli command-line hinting mechanism.
+# Each test case consists of two strings: a (partial) input command line, and the expected hint string.
+
+# Command with one arg: GET key
+"GET " "key"
+"GET abc " ""
+
+# Command with two args: DECRBY key decrement
+"DECRBY xyz 2 " ""
+"DECRBY xyz " "decrement"
+"DECRBY " "key decrement"
+
+# Command with optional arg: LPOP key [count]
+"LPOP key " "[count]"
+"LPOP key 3 " ""
+
+# Command with optional token arg: XRANGE key start end [COUNT count]
+"XRANGE " "key start end [COUNT count]"
+"XRANGE k 4 2 " "[COUNT count]"
+"XRANGE k 4 2 COU" "[COUNT count]"
+"XRANGE k 4 2 COUNT" "[COUNT count]"
+"XRANGE k 4 2 COUNT " "count"
+
+# Command with optional token block arg: BITFIELD_RO key [GET encoding offset [GET encoding offset ...]]
+"BITFIELD_RO k " "[GET encoding offset [GET encoding offset ...]]"
+"BITFIELD_RO k GE" "[GET encoding offset [GET encoding offset ...]]"
+"BITFIELD_RO k GET" "[GET encoding offset [GET encoding offset ...]]"
+# TODO: The following hints end with an unbalanced "]" which shouldn't be there.
+"BITFIELD_RO k GET " "encoding offset [GET encoding offset ...]]"
+"BITFIELD_RO k GET xyz " "offset [GET encoding offset ...]]"
+"BITFIELD_RO k GET xyz 12 " "[GET encoding offset ...]]"
+"BITFIELD_RO k GET xyz 12 GET " "encoding offset [GET encoding offset ...]]"
+"BITFIELD_RO k GET enc1 12 GET enc2 " "offset [GET encoding offset ...]]"
+"BITFIELD_RO k GET enc1 12 GET enc2 34 " "[GET encoding offset ...]]"
+
+# Two-word command with multiple non-token block args: CONFIG SET parameter value [parameter value ...]
+"CONFIG SET param " "value [parameter value ...]"
+"CONFIG SET param val " "[parameter value ...]"
+"CONFIG SET param val parm2 val2 " "[parameter value ...]"
+
+# Command with nested optional args: ZRANDMEMBER key [count [WITHSCORES]]
+"ZRANDMEMBER k " "[count [WITHSCORES]]"
+"ZRANDMEMBER k 3 " "[WITHSCORES]"
+"ZRANDMEMBER k 3 WI" "[WITHSCORES]"
+"ZRANDMEMBER k 3 WITHSCORES " ""
+# Wrong data type: count must be an integer. Hinting fails.
+"ZRANDMEMBER k cnt " ""
+
+# Command ends with repeated arg: MGET key [key ...]
+"MGET " "key [key ...]"
+"MGET k " "[key ...]"
+"MGET k k " "[key ...]"
+
+# Optional args can be in any order: SCAN cursor [MATCH pattern] [COUNT count] [TYPE type]
+"SCAN 2 MATCH " "pattern [COUNT count] [TYPE type]"
+"SCAN 2 COUNT " "count [MATCH pattern] [TYPE type]"
+
+# One-of choices: BLMOVE source destination LEFT|RIGHT LEFT|RIGHT timeout
+"BLMOVE src dst LEFT " "LEFT|RIGHT timeout"
+
+# Optional args can be in any order: ZRANGE key min max [BYSCORE|BYLEX] [REV] [LIMIT offset count] [WITHSCORES]
+"ZRANGE k 1 2 " "[BYSCORE|BYLEX] [REV] [LIMIT offset count] [WITHSCORES]"
+"ZRANGE k 1 2 bylex " "[REV] [LIMIT offset count] [WITHSCORES]"
+"ZRANGE k 1 2 bylex rev " "[LIMIT offset count] [WITHSCORES]"
+"ZRANGE k 1 2 limit 2 4 " "[BYSCORE|BYLEX] [REV] [WITHSCORES]"
+"ZRANGE k 1 2 bylex rev limit 2 4 WITHSCORES " ""
+"ZRANGE k 1 2 rev " "[BYSCORE|BYLEX] [LIMIT offset count] [WITHSCORES]"
+"ZRANGE k 1 2 WITHSCORES " "[BYSCORE|BYLEX] [REV] [LIMIT offset count]"
+
+# Optional one-of args with parameters: SET key value [NX|XX] [GET] [EX seconds|PX milliseconds|EXAT unix-time-seconds|PXAT unix-time-milliseconds|KEEPTTL]
+"SET key value " "[NX|XX] [GET] [EX seconds|PX milliseconds|EXAT unix-time-seconds|PXAT unix-time-milliseconds|KEEPTTL]"
+"SET key value EX" "[NX|XX] [GET] [EX seconds|PX milliseconds|EXAT unix-time-seconds|PXAT unix-time-milliseconds|KEEPTTL]"
+"SET key value EX " "seconds [NX|XX] [GET]"
+"SET key value EX 23 " "[NX|XX] [GET]"
+"SET key value EXAT" "[NX|XX] [GET] [EX seconds|PX milliseconds|EXAT unix-time-seconds|PXAT unix-time-milliseconds|KEEPTTL]"
+"SET key value EXAT " "unix-time-seconds [NX|XX] [GET]"
+"SET key value PX" "[NX|XX] [GET] [EX seconds|PX milliseconds|EXAT unix-time-seconds|PXAT unix-time-milliseconds|KEEPTTL]"
+"SET key value PX " "milliseconds [NX|XX] [GET]"
+"SET key value PXAT" "[NX|XX] [GET] [EX seconds|PX milliseconds|EXAT unix-time-seconds|PXAT unix-time-milliseconds|KEEPTTL]"
+"SET key value PXAT " "unix-time-milliseconds [NX|XX] [GET]"
+"SET key value KEEPTTL " "[NX|XX] [GET]"
+"SET key value XX " "[GET] [EX seconds|PX milliseconds|EXAT unix-time-seconds|PXAT unix-time-milliseconds|KEEPTTL]"
+
+# If an input word can't be matched, stop hinting.
+"SET key value FOOBAR " ""
+# Incorrect type for EX 'seconds' parameter - stop hinting.
+"SET key value EX sec " ""
+
+# Reordering partially-matched optional argument: GEORADIUS key longitude latitude radius M|KM|FT|MI [WITHCOORD] [WITHDIST] [WITHHASH] [COUNT count [ANY]] [ASC|DESC] [STORE key|STOREDIST key]
+"GEORADIUS key " "longitude latitude radius M|KM|FT|MI [WITHCOORD] [WITHDIST] [WITHHASH] [COUNT count [ANY]] [ASC|DESC] [STORE key|STOREDIST key]"
+"GEORADIUS key 1 2 3 M " "[WITHCOORD] [WITHDIST] [WITHHASH] [COUNT count [ANY]] [ASC|DESC] [STORE key|STOREDIST key]"
+"GEORADIUS key 1 2 3 M COUNT " "count [ANY] [WITHCOORD] [WITHDIST] [WITHHASH] [ASC|DESC] [STORE key|STOREDIST key]"
+"GEORADIUS key 1 2 3 M COUNT 12 " "[ANY] [WITHCOORD] [WITHDIST] [WITHHASH] [ASC|DESC] [STORE key|STOREDIST key]"
+"GEORADIUS key 1 2 3 M COUNT 12 " "[ANY] [WITHCOORD] [WITHDIST] [WITHHASH] [ASC|DESC] [STORE key|STOREDIST key]"
+"GEORADIUS key 1 -2.345 3 M COUNT 12 " "[ANY] [WITHCOORD] [WITHDIST] [WITHHASH] [ASC|DESC] [STORE key|STOREDIST key]"" ""
+# Wrong data type: latitude must be a double. Hinting fails.
+"GEORADIUS key 1 X " ""
+# Once the next optional argument is started, the [ANY] hint completing the COUNT argument disappears.
+"GEORADIUS key 1 2 3 M COUNT 12 ASC " "[WITHCOORD] [WITHDIST] [WITHHASH] [STORE key|STOREDIST key]"
+
+# Incorrect argument type for double-valued token parameter.
+"GEOSEARCH k FROMLONLAT " "longitude latitude BYRADIUS radius M|KM|FT|MI|BYBOX width height M|KM|FT|MI [ASC|DESC] [COUNT count [ANY]] [WITHCOORD] [WITHDIST] [WITHHASH]"
+"GEOSEARCH k FROMLONLAT 2.34 4.45 BYRADIUS badvalue " ""
+
+# Optional parameters followed by mandatory params: ZADD key [NX|XX] [GT|LT] [CH] [INCR] score member [score member ...]
+"ZADD key " "[NX|XX] [GT|LT] [CH] [INCR] score member [score member ...]"
+"ZADD key CH LT " "[NX|XX] [INCR] score member [score member ...]"
+"ZADD key 0 " "member [score member ...]"
+
+# Empty-valued token argument represented as a pair of double-quotes.
+"MIGRATE " "host port key|\"\" destination-db timeout [COPY] [REPLACE] [AUTH password|AUTH2 username password] [KEYS key [key ...]]"
diff --git a/tests/assets/user.acl b/tests/assets/user.acl
new file mode 100644
index 0000000..926ac54
--- /dev/null
+++ b/tests/assets/user.acl
@@ -0,0 +1,3 @@
+user alice on allcommands allkeys &* >alice
+user bob on -@all +@set +acl ~set* &* >bob
+user default on nopass ~* &* +@all
diff --git a/tests/assets/userwithselectors.acl b/tests/assets/userwithselectors.acl
new file mode 100644
index 0000000..5d42957
--- /dev/null
+++ b/tests/assets/userwithselectors.acl
@@ -0,0 +1,2 @@
+user alice on (+get ~rw*)
+user bob on (+set %W~w*) (+get %R~r*) \ No newline at end of file
diff --git a/tests/assets/zset-ziplist.rdb b/tests/assets/zset-ziplist.rdb
new file mode 100644
index 0000000..d554947
--- /dev/null
+++ b/tests/assets/zset-ziplist.rdb
Binary files differ
diff --git a/tests/cluster/cluster.tcl b/tests/cluster/cluster.tcl
new file mode 100644
index 0000000..9931eac
--- /dev/null
+++ b/tests/cluster/cluster.tcl
@@ -0,0 +1,222 @@
+# Cluster-specific test functions.
+#
+# Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com
+# This software is released under the BSD License. See the COPYING file for
+# more information.
+
+# Track cluster configuration as created by create_cluster below
+set ::cluster_master_nodes 0
+set ::cluster_replica_nodes 0
+
+# Returns a parsed CLUSTER NODES output as a list of dictionaries. Optional status field
+# can be specified to only returns entries that match the provided status.
+proc get_cluster_nodes {id {status "*"}} {
+ set lines [split [R $id cluster nodes] "\r\n"]
+ set nodes {}
+ foreach l $lines {
+ set l [string trim $l]
+ if {$l eq {}} continue
+ set args [split $l]
+ set node [dict create \
+ id [lindex $args 0] \
+ addr [lindex $args 1] \
+ flags [split [lindex $args 2] ,] \
+ slaveof [lindex $args 3] \
+ ping_sent [lindex $args 4] \
+ pong_recv [lindex $args 5] \
+ config_epoch [lindex $args 6] \
+ linkstate [lindex $args 7] \
+ slots [lrange $args 8 end] \
+ ]
+ if {[string match $status [lindex $args 7]]} {
+ lappend nodes $node
+ }
+ }
+ return $nodes
+}
+
+# Test node for flag.
+proc has_flag {node flag} {
+ expr {[lsearch -exact [dict get $node flags] $flag] != -1}
+}
+
+# Returns the parsed myself node entry as a dictionary.
+proc get_myself id {
+ set nodes [get_cluster_nodes $id]
+ foreach n $nodes {
+ if {[has_flag $n myself]} {return $n}
+ }
+ return {}
+}
+
+# Get a specific node by ID by parsing the CLUSTER NODES output
+# of the instance Number 'instance_id'
+proc get_node_by_id {instance_id node_id} {
+ set nodes [get_cluster_nodes $instance_id]
+ foreach n $nodes {
+ if {[dict get $n id] eq $node_id} {return $n}
+ }
+ return {}
+}
+
+# Return the value of the specified CLUSTER INFO field.
+proc CI {n field} {
+ get_info_field [R $n cluster info] $field
+}
+
+# Return the value of the specified INFO field.
+proc s {n field} {
+ get_info_field [R $n info] $field
+}
+
+# Assuming nodes are reset, this function performs slots allocation.
+# Only the first 'n' nodes are used.
+proc cluster_allocate_slots {n} {
+ set slot 16383
+ while {$slot >= 0} {
+ # Allocate successive slots to random nodes.
+ set node [randomInt $n]
+ lappend slots_$node $slot
+ incr slot -1
+ }
+ for {set j 0} {$j < $n} {incr j} {
+ R $j cluster addslots {*}[set slots_${j}]
+ }
+}
+
+# Check that cluster nodes agree about "state", or raise an error.
+proc assert_cluster_state {state} {
+ foreach_redis_id id {
+ if {[instance_is_killed redis $id]} continue
+ wait_for_condition 1000 50 {
+ [CI $id cluster_state] eq $state
+ } else {
+ fail "Cluster node $id cluster_state:[CI $id cluster_state]"
+ }
+ }
+}
+
+# Search the first node starting from ID $first that is not
+# already configured as a slave.
+proc cluster_find_available_slave {first} {
+ foreach_redis_id id {
+ if {$id < $first} continue
+ if {[instance_is_killed redis $id]} continue
+ set me [get_myself $id]
+ if {[dict get $me slaveof] eq {-}} {return $id}
+ }
+ fail "No available slaves"
+}
+
+# Add 'slaves' slaves to a cluster composed of 'masters' masters.
+# It assumes that masters are allocated sequentially from instance ID 0
+# to N-1.
+proc cluster_allocate_slaves {masters slaves} {
+ for {set j 0} {$j < $slaves} {incr j} {
+ set master_id [expr {$j % $masters}]
+ set slave_id [cluster_find_available_slave $masters]
+ set master_myself [get_myself $master_id]
+ R $slave_id cluster replicate [dict get $master_myself id]
+ }
+}
+
+# Create a cluster composed of the specified number of masters and slaves.
+proc create_cluster {masters slaves} {
+ cluster_allocate_slots $masters
+ if {$slaves} {
+ cluster_allocate_slaves $masters $slaves
+ }
+ assert_cluster_state ok
+
+ set ::cluster_master_nodes $masters
+ set ::cluster_replica_nodes $slaves
+}
+
+proc cluster_allocate_with_continuous_slots {n} {
+ set slot 16383
+ set avg [expr ($slot+1) / $n]
+ while {$slot >= 0} {
+ set node [expr $slot/$avg >= $n ? $n-1 : $slot/$avg]
+ lappend slots_$node $slot
+ incr slot -1
+ }
+ for {set j 0} {$j < $n} {incr j} {
+ R $j cluster addslots {*}[set slots_${j}]
+ }
+}
+
+# Create a cluster composed of the specified number of masters and slaves,
+# but with a continuous slot range.
+proc cluster_create_with_continuous_slots {masters slaves} {
+ cluster_allocate_with_continuous_slots $masters
+ if {$slaves} {
+ cluster_allocate_slaves $masters $slaves
+ }
+ assert_cluster_state ok
+
+ set ::cluster_master_nodes $masters
+ set ::cluster_replica_nodes $slaves
+}
+
+
+# Set the cluster node-timeout to all the reachalbe nodes.
+proc set_cluster_node_timeout {to} {
+ foreach_redis_id id {
+ catch {R $id CONFIG SET cluster-node-timeout $to}
+ }
+}
+
+# Check if the cluster is writable and readable. Use node "id"
+# as a starting point to talk with the cluster.
+proc cluster_write_test {id} {
+ set prefix [randstring 20 20 alpha]
+ set port [get_instance_attrib redis $id port]
+ set cluster [redis_cluster 127.0.0.1:$port]
+ for {set j 0} {$j < 100} {incr j} {
+ $cluster set key.$j $prefix.$j
+ }
+ for {set j 0} {$j < 100} {incr j} {
+ assert {[$cluster get key.$j] eq "$prefix.$j"}
+ }
+ $cluster close
+}
+
+# Check if cluster configuration is consistent.
+proc cluster_config_consistent {} {
+ for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
+ if {$j == 0} {
+ set base_cfg [R $j cluster slots]
+ } else {
+ set cfg [R $j cluster slots]
+ if {$cfg != $base_cfg} {
+ return 0
+ }
+ }
+ }
+
+ return 1
+}
+
+# Wait for cluster configuration to propagate and be consistent across nodes.
+proc wait_for_cluster_propagation {} {
+ wait_for_condition 50 100 {
+ [cluster_config_consistent] eq 1
+ } else {
+ fail "cluster config did not reach a consistent state"
+ }
+}
+
+# Check if cluster's view of hostnames is consistent
+proc are_hostnames_propagated {match_string} {
+ for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
+ set cfg [R $j cluster slots]
+ foreach node $cfg {
+ for {set i 2} {$i < [llength $node]} {incr i} {
+ if {! [string match $match_string [lindex [lindex [lindex $node $i] 3] 1]] } {
+ return 0
+ }
+ }
+ }
+ }
+ return 1
+}
diff --git a/tests/cluster/run.tcl b/tests/cluster/run.tcl
new file mode 100644
index 0000000..86c5f58
--- /dev/null
+++ b/tests/cluster/run.tcl
@@ -0,0 +1,32 @@
+# Cluster test suite. Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com
+# This software is released under the BSD License. See the COPYING file for
+# more information.
+
+cd tests/cluster
+source cluster.tcl
+source ../instances.tcl
+source ../../support/cluster.tcl ; # Redis Cluster client.
+
+set ::instances_count 20 ; # How many instances we use at max.
+set ::tlsdir "../../tls"
+
+proc main {} {
+ parse_options
+ spawn_instance redis $::redis_base_port $::instances_count {
+ "cluster-enabled yes"
+ "appendonly yes"
+ "enable-protected-configs yes"
+ "enable-debug-command yes"
+ "save ''"
+ }
+ run_tests
+ cleanup
+ end_tests
+}
+
+if {[catch main e]} {
+ puts $::errorInfo
+ if {$::pause_on_error} pause_on_error
+ cleanup
+ exit 1
+}
diff --git a/tests/cluster/tests/00-base.tcl b/tests/cluster/tests/00-base.tcl
new file mode 100644
index 0000000..693dded
--- /dev/null
+++ b/tests/cluster/tests/00-base.tcl
@@ -0,0 +1,89 @@
+# Check the basic monitoring and failover capabilities.
+
+source "../tests/includes/init-tests.tcl"
+
+if {$::simulate_error} {
+ test "This test will fail" {
+ fail "Simulated error"
+ }
+}
+
+test "Different nodes have different IDs" {
+ set ids {}
+ set numnodes 0
+ foreach_redis_id id {
+ incr numnodes
+ # Every node should just know itself.
+ set nodeid [dict get [get_myself $id] id]
+ assert {$nodeid ne {}}
+ lappend ids $nodeid
+ }
+ set numids [llength [lsort -unique $ids]]
+ assert {$numids == $numnodes}
+}
+
+test "It is possible to perform slot allocation" {
+ cluster_allocate_slots 5
+}
+
+test "After the join, every node gets a different config epoch" {
+ set trynum 60
+ while {[incr trynum -1] != 0} {
+ # We check that this condition is true for *all* the nodes.
+ set ok 1 ; # Will be set to 0 every time a node is not ok.
+ foreach_redis_id id {
+ set epochs {}
+ foreach n [get_cluster_nodes $id] {
+ lappend epochs [dict get $n config_epoch]
+ }
+ if {[lsort $epochs] != [lsort -unique $epochs]} {
+ set ok 0 ; # At least one collision!
+ }
+ }
+ if {$ok} break
+ after 1000
+ puts -nonewline .
+ flush stdout
+ }
+ if {$trynum == 0} {
+ fail "Config epoch conflict resolution is not working."
+ }
+}
+
+test "Nodes should report cluster_state is ok now" {
+ assert_cluster_state ok
+}
+
+test "Sanity for CLUSTER COUNTKEYSINSLOT" {
+ set reply [R 0 CLUSTER COUNTKEYSINSLOT 0]
+ assert {$reply eq 0}
+}
+
+test "It is possible to write and read from the cluster" {
+ cluster_write_test 0
+}
+
+test "CLUSTER RESET SOFT test" {
+ set last_epoch_node0 [get_info_field [R 0 cluster info] cluster_current_epoch]
+ R 0 FLUSHALL
+ R 0 CLUSTER RESET
+ assert {[get_info_field [R 0 cluster info] cluster_current_epoch] eq $last_epoch_node0}
+
+ set last_epoch_node1 [get_info_field [R 1 cluster info] cluster_current_epoch]
+ R 1 FLUSHALL
+ R 1 CLUSTER RESET SOFT
+ assert {[get_info_field [R 1 cluster info] cluster_current_epoch] eq $last_epoch_node1}
+}
+
+test "Coverage: CLUSTER HELP" {
+ assert_match "*CLUSTER <subcommand> *" [R 0 CLUSTER HELP]
+}
+
+test "Coverage: ASKING" {
+ assert_equal {OK} [R 0 ASKING]
+}
+
+test "CLUSTER SLAVES and CLUSTER REPLICAS with zero replicas" {
+ assert_equal {} [R 0 cluster slaves [R 0 CLUSTER MYID]]
+ assert_equal {} [R 0 cluster replicas [R 0 CLUSTER MYID]]
+}
diff --git a/tests/cluster/tests/01-faildet.tcl b/tests/cluster/tests/01-faildet.tcl
new file mode 100644
index 0000000..8fe87c9
--- /dev/null
+++ b/tests/cluster/tests/01-faildet.tcl
@@ -0,0 +1,38 @@
+# Check the basic monitoring and failover capabilities.
+
+source "../tests/includes/init-tests.tcl"
+
+test "Create a 5 nodes cluster" {
+ create_cluster 5 5
+}
+
+test "Cluster should start ok" {
+ assert_cluster_state ok
+}
+
+test "Killing two slave nodes" {
+ kill_instance redis 5
+ kill_instance redis 6
+}
+
+test "Cluster should be still up" {
+ assert_cluster_state ok
+}
+
+test "Killing one master node" {
+ kill_instance redis 0
+}
+
+# Note: the only slave of instance 0 is already down so no
+# failover is possible, that would change the state back to ok.
+test "Cluster should be down now" {
+ assert_cluster_state fail
+}
+
+test "Restarting master node" {
+ restart_instance redis 0
+}
+
+test "Cluster should be up again" {
+ assert_cluster_state ok
+}
diff --git a/tests/cluster/tests/02-failover.tcl b/tests/cluster/tests/02-failover.tcl
new file mode 100644
index 0000000..6b2fd09
--- /dev/null
+++ b/tests/cluster/tests/02-failover.tcl
@@ -0,0 +1,65 @@
+# Check the basic monitoring and failover capabilities.
+
+source "../tests/includes/init-tests.tcl"
+
+test "Create a 5 nodes cluster" {
+ create_cluster 5 5
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 0
+}
+
+test "Instance #5 is a slave" {
+ assert {[RI 5 role] eq {slave}}
+}
+
+test "Instance #5 synced with the master" {
+ wait_for_condition 1000 50 {
+ [RI 5 master_link_status] eq {up}
+ } else {
+ fail "Instance #5 master link status is not up"
+ }
+}
+
+set current_epoch [CI 1 cluster_current_epoch]
+
+test "Killing one master node" {
+ kill_instance redis 0
+}
+
+test "Wait for failover" {
+ wait_for_condition 1000 50 {
+ [CI 1 cluster_current_epoch] > $current_epoch
+ } else {
+ fail "No failover detected"
+ }
+}
+
+test "Cluster should eventually be up again" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 1
+}
+
+test "Instance #5 is now a master" {
+ assert {[RI 5 role] eq {master}}
+}
+
+test "Restarting the previously killed master node" {
+ restart_instance redis 0
+}
+
+test "Instance #0 gets converted into a slave" {
+ wait_for_condition 1000 50 {
+ [RI 0 role] eq {slave}
+ } else {
+ fail "Old master was not converted into slave"
+ }
+}
diff --git a/tests/cluster/tests/03-failover-loop.tcl b/tests/cluster/tests/03-failover-loop.tcl
new file mode 100644
index 0000000..46c22a9
--- /dev/null
+++ b/tests/cluster/tests/03-failover-loop.tcl
@@ -0,0 +1,117 @@
+# Failover stress test.
+# In this test a different node is killed in a loop for N
+# iterations. The test checks that certain properties
+# are preserved across iterations.
+
+source "../tests/includes/init-tests.tcl"
+
+test "Create a 5 nodes cluster" {
+ create_cluster 5 5
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+set iterations 20
+set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]]
+
+while {[incr iterations -1]} {
+ set tokill [randomInt 10]
+ set other [expr {($tokill+1)%10}] ; # Some other instance.
+ set key [randstring 20 20 alpha]
+ set val [randstring 20 20 alpha]
+ set role [RI $tokill role]
+ if {$role eq {master}} {
+ set slave {}
+ set myid [dict get [get_myself $tokill] id]
+ foreach_redis_id id {
+ if {$id == $tokill} continue
+ if {[dict get [get_myself $id] slaveof] eq $myid} {
+ set slave $id
+ }
+ }
+ if {$slave eq {}} {
+ fail "Unable to retrieve slave's ID for master #$tokill"
+ }
+ }
+
+ puts "--- Iteration $iterations ---"
+
+ if {$role eq {master}} {
+ test "Wait for slave of #$tokill to sync" {
+ wait_for_condition 1000 50 {
+ [string match {*state=online*} [RI $tokill slave0]]
+ } else {
+ fail "Slave of node #$tokill is not ok"
+ }
+ }
+ set slave_config_epoch [CI $slave cluster_my_epoch]
+ }
+
+ test "Cluster is writable before failover" {
+ for {set i 0} {$i < 100} {incr i} {
+ catch {$cluster set $key:$i $val:$i} err
+ assert {$err eq {OK}}
+ }
+ # Wait for the write to propagate to the slave if we
+ # are going to kill a master.
+ if {$role eq {master}} {
+ R $tokill wait 1 20000
+ }
+ }
+
+ test "Terminating node #$tokill" {
+ # Stop AOF so that an initial AOFRW won't prevent the instance from terminating
+ R $tokill config set appendonly no
+ kill_instance redis $tokill
+ }
+
+ if {$role eq {master}} {
+ test "Wait failover by #$slave with old epoch $slave_config_epoch" {
+ wait_for_condition 1000 50 {
+ [CI $slave cluster_my_epoch] > $slave_config_epoch
+ } else {
+ fail "No failover detected, epoch is still [CI $slave cluster_my_epoch]"
+ }
+ }
+ }
+
+ test "Cluster should eventually be up again" {
+ assert_cluster_state ok
+ }
+
+ test "Cluster is writable again" {
+ for {set i 0} {$i < 100} {incr i} {
+ catch {$cluster set $key:$i:2 $val:$i:2} err
+ assert {$err eq {OK}}
+ }
+ }
+
+ test "Restarting node #$tokill" {
+ restart_instance redis $tokill
+ }
+
+ test "Instance #$tokill is now a slave" {
+ wait_for_condition 1000 50 {
+ [RI $tokill role] eq {slave}
+ } else {
+ fail "Restarted instance is not a slave"
+ }
+ }
+
+ test "We can read back the value we set before" {
+ for {set i 0} {$i < 100} {incr i} {
+ catch {$cluster get $key:$i} err
+ assert {$err eq "$val:$i"}
+ catch {$cluster get $key:$i:2} err
+ assert {$err eq "$val:$i:2"}
+ }
+ }
+}
+
+test "Post condition: current_epoch >= my_epoch everywhere" {
+ foreach_redis_id id {
+ assert {[CI $id cluster_current_epoch] >= [CI $id cluster_my_epoch]}
+ }
+}
diff --git a/tests/cluster/tests/04-resharding.tcl b/tests/cluster/tests/04-resharding.tcl
new file mode 100644
index 0000000..18a26bd
--- /dev/null
+++ b/tests/cluster/tests/04-resharding.tcl
@@ -0,0 +1,196 @@
+# Failover stress test.
+# In this test a different node is killed in a loop for N
+# iterations. The test checks that certain properties
+# are preserved across iterations.
+
+source "../tests/includes/init-tests.tcl"
+source "../../../tests/support/cli.tcl"
+
+test "Create a 5 nodes cluster" {
+ create_cluster 5 5
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "Enable AOF in all the instances" {
+ foreach_redis_id id {
+ R $id config set appendonly yes
+ # We use "appendfsync no" because it's fast but also guarantees that
+ # write(2) is performed before replying to client.
+ R $id config set appendfsync no
+ }
+
+ foreach_redis_id id {
+ wait_for_condition 1000 500 {
+ [RI $id aof_rewrite_in_progress] == 0 &&
+ [RI $id aof_enabled] == 1
+ } else {
+ fail "Failed to enable AOF on instance #$id"
+ }
+ }
+}
+
+# Return non-zero if the specified PID is about a process still in execution,
+# otherwise 0 is returned.
+proc process_is_running {pid} {
+ # PS should return with an error if PID is non existing,
+ # and catch will return non-zero. We want to return non-zero if
+ # the PID exists, so we invert the return value with expr not operator.
+ expr {![catch {exec ps -p $pid}]}
+}
+
+# Our resharding test performs the following actions:
+#
+# - N commands are sent to the cluster in the course of the test.
+# - Every command selects a random key from key:0 to key:MAX-1.
+# - The operation RPUSH key <randomvalue> is performed.
+# - Tcl remembers into an array all the values pushed to each list.
+# - After N/2 commands, the resharding process is started in background.
+# - The test continues while the resharding is in progress.
+# - At the end of the test, we wait for the resharding process to stop.
+# - Finally the keys are checked to see if they contain the value they should.
+
+set numkeys 50000
+set numops 200000
+set start_node_port [get_instance_attrib redis 0 port]
+set cluster [redis_cluster 127.0.0.1:$start_node_port]
+if {$::tls} {
+ # setup a non-TLS cluster client to the TLS cluster
+ set plaintext_port [get_instance_attrib redis 0 plaintext-port]
+ set cluster_plaintext [redis_cluster 127.0.0.1:$plaintext_port 0]
+ puts "Testing TLS cluster on start node 127.0.0.1:$start_node_port, plaintext port $plaintext_port"
+} else {
+ set cluster_plaintext $cluster
+ puts "Testing using non-TLS cluster"
+}
+catch {unset content}
+array set content {}
+set tribpid {}
+
+test "Cluster consistency during live resharding" {
+ set ele 0
+ for {set j 0} {$j < $numops} {incr j} {
+ # Trigger the resharding once we execute half the ops.
+ if {$tribpid ne {} &&
+ ($j % 10000) == 0 &&
+ ![process_is_running $tribpid]} {
+ set tribpid {}
+ }
+
+ if {$j >= $numops/2 && $tribpid eq {}} {
+ puts -nonewline "...Starting resharding..."
+ flush stdout
+ set target [dict get [get_myself [randomInt 5]] id]
+ set tribpid [lindex [exec \
+ ../../../src/redis-cli --cluster reshard \
+ 127.0.0.1:[get_instance_attrib redis 0 port] \
+ --cluster-from all \
+ --cluster-to $target \
+ --cluster-slots 100 \
+ --cluster-yes \
+ {*}[rediscli_tls_config "../../../tests"] \
+ | [info nameofexecutable] \
+ ../tests/helpers/onlydots.tcl \
+ &] 0]
+ }
+
+ # Write random data to random list.
+ set listid [randomInt $numkeys]
+ set key "key:$listid"
+ incr ele
+ # We write both with Lua scripts and with plain commands.
+ # This way we are able to stress Lua -> Redis command invocation
+ # as well, that has tests to prevent Lua to write into wrong
+ # hash slots.
+ # We also use both TLS and plaintext connections.
+ if {$listid % 3 == 0} {
+ $cluster rpush $key $ele
+ } elseif {$listid % 3 == 1} {
+ $cluster_plaintext rpush $key $ele
+ } else {
+ $cluster eval {redis.call("rpush",KEYS[1],ARGV[1])} 1 $key $ele
+ }
+ lappend content($key) $ele
+
+ if {($j % 1000) == 0} {
+ puts -nonewline W; flush stdout
+ }
+ }
+
+ # Wait for the resharding process to end
+ wait_for_condition 1000 500 {
+ [process_is_running $tribpid] == 0
+ } else {
+ fail "Resharding is not terminating after some time."
+ }
+
+}
+
+test "Verify $numkeys keys for consistency with logical content" {
+ # Check that the Redis Cluster content matches our logical content.
+ foreach {key value} [array get content] {
+ if {[$cluster lrange $key 0 -1] ne $value} {
+ fail "Key $key expected to hold '$value' but actual content is [$cluster lrange $key 0 -1]"
+ }
+ }
+}
+
+test "Terminate and restart all the instances" {
+ foreach_redis_id id {
+ # Stop AOF so that an initial AOFRW won't prevent the instance from terminating
+ R $id config set appendonly no
+ kill_instance redis $id
+ restart_instance redis $id
+ }
+}
+
+test "Cluster should eventually be up again" {
+ assert_cluster_state ok
+}
+
+test "Verify $numkeys keys after the restart" {
+ # Check that the Redis Cluster content matches our logical content.
+ foreach {key value} [array get content] {
+ if {[$cluster lrange $key 0 -1] ne $value} {
+ fail "Key $key expected to hold '$value' but actual content is [$cluster lrange $key 0 -1]"
+ }
+ }
+}
+
+test "Disable AOF in all the instances" {
+ foreach_redis_id id {
+ R $id config set appendonly no
+ }
+}
+
+test "Verify slaves consistency" {
+ set verified_masters 0
+ foreach_redis_id id {
+ set role [R $id role]
+ lassign $role myrole myoffset slaves
+ if {$myrole eq {slave}} continue
+ set masterport [get_instance_attrib redis $id port]
+ set masterdigest [R $id debug digest]
+ foreach_redis_id sid {
+ set srole [R $sid role]
+ if {[lindex $srole 0] eq {master}} continue
+ if {[lindex $srole 2] != $masterport} continue
+ wait_for_condition 1000 500 {
+ [R $sid debug digest] eq $masterdigest
+ } else {
+ fail "Master and slave data digest are different"
+ }
+ incr verified_masters
+ }
+ }
+ assert {$verified_masters >= 5}
+}
+
+test "Dump sanitization was skipped for migrations" {
+ set verified_masters 0
+ foreach_redis_id id {
+ assert {[RI $id dump_payload_sanitizations] == 0}
+ }
+}
diff --git a/tests/cluster/tests/05-slave-selection.tcl b/tests/cluster/tests/05-slave-selection.tcl
new file mode 100644
index 0000000..bdb20a3
--- /dev/null
+++ b/tests/cluster/tests/05-slave-selection.tcl
@@ -0,0 +1,188 @@
+# Slave selection test
+# Check the algorithm trying to pick the slave with the most complete history.
+
+source "../tests/includes/init-tests.tcl"
+
+# Create a cluster with 5 master and 10 slaves, so that we have 2
+# slaves for each master.
+test "Create a 5 nodes cluster" {
+ create_cluster 5 10
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "The first master has actually two slaves" {
+ wait_for_condition 1000 50 {
+ [llength [lindex [R 0 role] 2]] == 2
+ && [llength [R 0 cluster replicas [R 0 CLUSTER MYID]]] == 2
+ } else {
+ fail "replicas didn't connect"
+ }
+}
+
+test "CLUSTER SLAVES and CLUSTER REPLICAS output is consistent" {
+ # Because we already have command output that cover CLUSTER REPLICAS elsewhere,
+ # here we simply judge whether their output is consistent to cover CLUSTER SLAVES.
+ set res [R 0 cluster slaves [R 0 CLUSTER MYID]]
+ set res2 [R 0 cluster replicas [R 0 CLUSTER MYID]]
+ assert_equal $res $res2
+}
+
+test {Slaves of #0 are instance #5 and #10 as expected} {
+ set port0 [get_instance_attrib redis 0 port]
+ assert {[lindex [R 5 role] 2] == $port0}
+ assert {[lindex [R 10 role] 2] == $port0}
+}
+
+test "Instance #5 and #10 synced with the master" {
+ wait_for_condition 1000 50 {
+ [RI 5 master_link_status] eq {up} &&
+ [RI 10 master_link_status] eq {up}
+ } else {
+ fail "Instance #5 or #10 master link status is not up"
+ }
+}
+
+set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]]
+
+test "Slaves are both able to receive and acknowledge writes" {
+ for {set j 0} {$j < 100} {incr j} {
+ $cluster set $j $j
+ }
+ assert {[R 0 wait 2 60000] == 2}
+}
+
+test "Write data while slave #10 is paused and can't receive it" {
+ # Stop the slave with a multi/exec transaction so that the master will
+ # be killed as soon as it can accept writes again.
+ R 10 multi
+ R 10 debug sleep 10
+ R 10 client kill 127.0.0.1:$port0
+ R 10 deferred 1
+ R 10 exec
+
+ # Write some data the slave can't receive.
+ for {set j 0} {$j < 100} {incr j} {
+ $cluster set $j $j
+ }
+
+ # Prevent the master from accepting new slaves.
+ # Use a large pause value since we'll kill it anyway.
+ R 0 CLIENT PAUSE 60000
+
+ # Wait for the slave to return available again
+ R 10 deferred 0
+ assert {[R 10 read] eq {OK OK}}
+
+ # Kill the master so that a reconnection will not be possible.
+ kill_instance redis 0
+}
+
+test "Wait for instance #5 (and not #10) to turn into a master" {
+ wait_for_condition 1000 50 {
+ [RI 5 role] eq {master}
+ } else {
+ fail "No failover detected"
+ }
+}
+
+test "Wait for the node #10 to return alive before ending the test" {
+ R 10 ping
+}
+
+test "Cluster should eventually be up again" {
+ assert_cluster_state ok
+}
+
+test "Node #10 should eventually replicate node #5" {
+ set port5 [get_instance_attrib redis 5 port]
+ wait_for_condition 1000 50 {
+ ([lindex [R 10 role] 2] == $port5) &&
+ ([lindex [R 10 role] 3] eq {connected})
+ } else {
+ fail "#10 didn't became slave of #5"
+ }
+}
+
+source "../tests/includes/init-tests.tcl"
+
+# Create a cluster with 3 master and 15 slaves, so that we have 5
+# slaves for eatch master.
+test "Create a 3 nodes cluster" {
+ create_cluster 3 15
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "The first master has actually 5 slaves" {
+ wait_for_condition 1000 50 {
+ [llength [lindex [R 0 role] 2]] == 5
+ } else {
+ fail "replicas didn't connect"
+ }
+}
+
+test {Slaves of #0 are instance #3, #6, #9, #12 and #15 as expected} {
+ set port0 [get_instance_attrib redis 0 port]
+ assert {[lindex [R 3 role] 2] == $port0}
+ assert {[lindex [R 6 role] 2] == $port0}
+ assert {[lindex [R 9 role] 2] == $port0}
+ assert {[lindex [R 12 role] 2] == $port0}
+ assert {[lindex [R 15 role] 2] == $port0}
+}
+
+test {Instance #3, #6, #9, #12 and #15 synced with the master} {
+ wait_for_condition 1000 50 {
+ [RI 3 master_link_status] eq {up} &&
+ [RI 6 master_link_status] eq {up} &&
+ [RI 9 master_link_status] eq {up} &&
+ [RI 12 master_link_status] eq {up} &&
+ [RI 15 master_link_status] eq {up}
+ } else {
+ fail "Instance #3 or #6 or #9 or #12 or #15 master link status is not up"
+ }
+}
+
+proc master_detected {instances} {
+ foreach instance [dict keys $instances] {
+ if {[RI $instance role] eq {master}} {
+ return true
+ }
+ }
+
+ return false
+}
+
+test "New Master down consecutively" {
+ set instances [dict create 0 1 3 1 6 1 9 1 12 1 15 1]
+
+ set loops [expr {[dict size $instances]-1}]
+ for {set i 0} {$i < $loops} {incr i} {
+ set master_id -1
+ foreach instance [dict keys $instances] {
+ if {[RI $instance role] eq {master}} {
+ set master_id $instance
+ break;
+ }
+ }
+
+ if {$master_id eq -1} {
+ fail "no master detected, #loop $i"
+ }
+
+ set instances [dict remove $instances $master_id]
+
+ kill_instance redis $master_id
+ wait_for_condition 1000 50 {
+ [master_detected $instances]
+ } else {
+ fail "No failover detected when master $master_id fails"
+ }
+
+ assert_cluster_state ok
+ }
+}
diff --git a/tests/cluster/tests/06-slave-stop-cond.tcl b/tests/cluster/tests/06-slave-stop-cond.tcl
new file mode 100644
index 0000000..80a2d17
--- /dev/null
+++ b/tests/cluster/tests/06-slave-stop-cond.tcl
@@ -0,0 +1,77 @@
+# Slave stop condition test
+# Check that if there is a disconnection time limit, the slave will not try
+# to failover its master.
+
+source "../tests/includes/init-tests.tcl"
+
+# Create a cluster with 5 master and 5 slaves.
+test "Create a 5 nodes cluster" {
+ create_cluster 5 5
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "The first master has actually one slave" {
+ wait_for_condition 1000 50 {
+ [llength [lindex [R 0 role] 2]] == 1
+ } else {
+ fail "replicas didn't connect"
+ }
+}
+
+test {Slaves of #0 is instance #5 as expected} {
+ set port0 [get_instance_attrib redis 0 port]
+ assert {[lindex [R 5 role] 2] == $port0}
+}
+
+test "Instance #5 synced with the master" {
+ wait_for_condition 1000 50 {
+ [RI 5 master_link_status] eq {up}
+ } else {
+ fail "Instance #5 master link status is not up"
+ }
+}
+
+test "Lower the slave validity factor of #5 to the value of 2" {
+ assert {[R 5 config set cluster-slave-validity-factor 2] eq {OK}}
+}
+
+test "Break master-slave link and prevent further reconnections" {
+ # Stop the slave with a multi/exec transaction so that the master will
+ # be killed as soon as it can accept writes again.
+ R 5 multi
+ R 5 client kill 127.0.0.1:$port0
+ # here we should sleep 6 or more seconds (node_timeout * slave_validity)
+ # but the actual validity time is actually incremented by the
+ # repl-ping-slave-period value which is 10 seconds by default. So we
+ # need to wait more than 16 seconds.
+ R 5 debug sleep 20
+ R 5 deferred 1
+ R 5 exec
+
+ # Prevent the master from accepting new slaves.
+ # Use a large pause value since we'll kill it anyway.
+ R 0 CLIENT PAUSE 60000
+
+ # Wait for the slave to return available again
+ R 5 deferred 0
+ assert {[R 5 read] eq {OK OK}}
+
+ # Kill the master so that a reconnection will not be possible.
+ kill_instance redis 0
+}
+
+test "Slave #5 is reachable and alive" {
+ assert {[R 5 ping] eq {PONG}}
+}
+
+test "Slave #5 should not be able to failover" {
+ after 10000
+ assert {[RI 5 role] eq {slave}}
+}
+
+test "Cluster should be down" {
+ assert_cluster_state fail
+}
diff --git a/tests/cluster/tests/07-replica-migration.tcl b/tests/cluster/tests/07-replica-migration.tcl
new file mode 100644
index 0000000..c4e9985
--- /dev/null
+++ b/tests/cluster/tests/07-replica-migration.tcl
@@ -0,0 +1,103 @@
+# Replica migration test.
+# Check that orphaned masters are joined by replicas of masters having
+# multiple replicas attached, according to the migration barrier settings.
+
+source "../tests/includes/init-tests.tcl"
+
+# Create a cluster with 5 master and 10 slaves, so that we have 2
+# slaves for each master.
+test "Create a 5 nodes cluster" {
+ create_cluster 5 10
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "Each master should have two replicas attached" {
+ foreach_redis_id id {
+ if {$id < 5} {
+ wait_for_condition 1000 50 {
+ [llength [lindex [R $id role] 2]] == 2
+ } else {
+ fail "Master #$id does not have 2 slaves as expected"
+ }
+ }
+ }
+}
+
+test "Killing all the slaves of master #0 and #1" {
+ kill_instance redis 5
+ kill_instance redis 10
+ kill_instance redis 6
+ kill_instance redis 11
+ after 4000
+}
+
+foreach_redis_id id {
+ if {$id < 5} {
+ test "Master #$id should have at least one replica" {
+ wait_for_condition 1000 50 {
+ [llength [lindex [R $id role] 2]] >= 1
+ } else {
+ fail "Master #$id has no replicas"
+ }
+ }
+ }
+}
+
+# Now test the migration to a master which used to be a slave, after
+# a failver.
+
+source "../tests/includes/init-tests.tcl"
+
+# Create a cluster with 5 master and 10 slaves, so that we have 2
+# slaves for each master.
+test "Create a 5 nodes cluster" {
+ create_cluster 5 10
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "Kill slave #7 of master #2. Only slave left is #12 now" {
+ kill_instance redis 7
+}
+
+set current_epoch [CI 1 cluster_current_epoch]
+
+test "Killing master node #2, #12 should failover" {
+ kill_instance redis 2
+}
+
+test "Wait for failover" {
+ wait_for_condition 1000 50 {
+ [CI 1 cluster_current_epoch] > $current_epoch
+ } else {
+ fail "No failover detected"
+ }
+}
+
+test "Cluster should eventually be up again" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 1
+}
+
+test "Instance 12 is now a master without slaves" {
+ assert {[RI 12 role] eq {master}}
+}
+
+# The remaining instance is now without slaves. Some other slave
+# should migrate to it.
+
+test "Master #12 should get at least one migrated replica" {
+ wait_for_condition 1000 50 {
+ [llength [lindex [R 12 role] 2]] >= 1
+ } else {
+ fail "Master #12 has no replicas"
+ }
+}
diff --git a/tests/cluster/tests/08-update-msg.tcl b/tests/cluster/tests/08-update-msg.tcl
new file mode 100644
index 0000000..9011f32
--- /dev/null
+++ b/tests/cluster/tests/08-update-msg.tcl
@@ -0,0 +1,90 @@
+# Test UPDATE messages sent by other nodes when the currently authorirative
+# master is unavailable. The test is performed in the following steps:
+#
+# 1) Master goes down.
+# 2) Slave failover and becomes new master.
+# 3) New master is partitioned away.
+# 4) Old master returns.
+# 5) At this point we expect the old master to turn into a slave ASAP because
+# of the UPDATE messages it will receive from the other nodes when its
+# configuration will be found to be outdated.
+
+source "../tests/includes/init-tests.tcl"
+
+test "Create a 5 nodes cluster" {
+ create_cluster 5 5
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 0
+}
+
+test "Instance #5 is a slave" {
+ assert {[RI 5 role] eq {slave}}
+}
+
+test "Instance #5 synced with the master" {
+ wait_for_condition 1000 50 {
+ [RI 5 master_link_status] eq {up}
+ } else {
+ fail "Instance #5 master link status is not up"
+ }
+}
+
+set current_epoch [CI 1 cluster_current_epoch]
+
+test "Killing one master node" {
+ kill_instance redis 0
+}
+
+test "Wait for failover" {
+ wait_for_condition 1000 50 {
+ [CI 1 cluster_current_epoch] > $current_epoch
+ } else {
+ fail "No failover detected"
+ }
+}
+
+test "Cluster should eventually be up again" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 1
+}
+
+test "Instance #5 is now a master" {
+ assert {[RI 5 role] eq {master}}
+}
+
+test "Killing the new master #5" {
+ kill_instance redis 5
+}
+
+test "Cluster should be down now" {
+ assert_cluster_state fail
+}
+
+test "Restarting the old master node" {
+ restart_instance redis 0
+}
+
+test "Instance #0 gets converted into a slave" {
+ wait_for_condition 1000 50 {
+ [RI 0 role] eq {slave}
+ } else {
+ fail "Old master was not converted into slave"
+ }
+}
+
+test "Restarting the new master node" {
+ restart_instance redis 5
+}
+
+test "Cluster is up again" {
+ assert_cluster_state ok
+}
diff --git a/tests/cluster/tests/09-pubsub.tcl b/tests/cluster/tests/09-pubsub.tcl
new file mode 100644
index 0000000..e62b91c
--- /dev/null
+++ b/tests/cluster/tests/09-pubsub.tcl
@@ -0,0 +1,40 @@
+# Test PUBLISH propagation across the cluster.
+
+source "../tests/includes/init-tests.tcl"
+
+test "Create a 5 nodes cluster" {
+ create_cluster 5 5
+}
+
+proc test_cluster_publish {instance instances} {
+ # Subscribe all the instances but the one we use to send.
+ for {set j 0} {$j < $instances} {incr j} {
+ if {$j != $instance} {
+ R $j deferred 1
+ R $j subscribe testchannel
+ R $j read; # Read the subscribe reply
+ }
+ }
+
+ set data [randomValue]
+ R $instance PUBLISH testchannel $data
+
+ # Read the message back from all the nodes.
+ for {set j 0} {$j < $instances} {incr j} {
+ if {$j != $instance} {
+ set msg [R $j read]
+ assert {$data eq [lindex $msg 2]}
+ R $j unsubscribe testchannel
+ R $j read; # Read the unsubscribe reply
+ R $j deferred 0
+ }
+ }
+}
+
+test "Test publishing to master" {
+ test_cluster_publish 0 10
+}
+
+test "Test publishing to slave" {
+ test_cluster_publish 5 10
+}
diff --git a/tests/cluster/tests/10-manual-failover.tcl b/tests/cluster/tests/10-manual-failover.tcl
new file mode 100644
index 0000000..5441b79
--- /dev/null
+++ b/tests/cluster/tests/10-manual-failover.tcl
@@ -0,0 +1,192 @@
+# Check the manual failover
+
+source "../tests/includes/init-tests.tcl"
+
+test "Create a 5 nodes cluster" {
+ create_cluster 5 5
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 0
+}
+
+test "Instance #5 is a slave" {
+ assert {[RI 5 role] eq {slave}}
+}
+
+test "Instance #5 synced with the master" {
+ wait_for_condition 1000 50 {
+ [RI 5 master_link_status] eq {up}
+ } else {
+ fail "Instance #5 master link status is not up"
+ }
+}
+
+set current_epoch [CI 1 cluster_current_epoch]
+
+set numkeys 50000
+set numops 10000
+set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]]
+catch {unset content}
+array set content {}
+
+test "Send CLUSTER FAILOVER to #5, during load" {
+ for {set j 0} {$j < $numops} {incr j} {
+ # Write random data to random list.
+ set listid [randomInt $numkeys]
+ set key "key:$listid"
+ set ele [randomValue]
+ # We write both with Lua scripts and with plain commands.
+ # This way we are able to stress Lua -> Redis command invocation
+ # as well, that has tests to prevent Lua to write into wrong
+ # hash slots.
+ if {$listid % 2} {
+ $cluster rpush $key $ele
+ } else {
+ $cluster eval {redis.call("rpush",KEYS[1],ARGV[1])} 1 $key $ele
+ }
+ lappend content($key) $ele
+
+ if {($j % 1000) == 0} {
+ puts -nonewline W; flush stdout
+ }
+
+ if {$j == $numops/2} {R 5 cluster failover}
+ }
+}
+
+test "Wait for failover" {
+ wait_for_condition 1000 50 {
+ [CI 1 cluster_current_epoch] > $current_epoch
+ } else {
+ fail "No failover detected"
+ }
+}
+
+test "Cluster should eventually be up again" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 1
+}
+
+test "Instance #5 is now a master" {
+ assert {[RI 5 role] eq {master}}
+}
+
+test "Verify $numkeys keys for consistency with logical content" {
+ # Check that the Redis Cluster content matches our logical content.
+ foreach {key value} [array get content] {
+ assert {[$cluster lrange $key 0 -1] eq $value}
+ }
+}
+
+test "Instance #0 gets converted into a slave" {
+ wait_for_condition 1000 50 {
+ [RI 0 role] eq {slave}
+ } else {
+ fail "Old master was not converted into slave"
+ }
+}
+
+## Check that manual failover does not happen if we can't talk with the master.
+
+source "../tests/includes/init-tests.tcl"
+
+test "Create a 5 nodes cluster" {
+ create_cluster 5 5
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 0
+}
+
+test "Instance #5 is a slave" {
+ assert {[RI 5 role] eq {slave}}
+}
+
+test "Instance #5 synced with the master" {
+ wait_for_condition 1000 50 {
+ [RI 5 master_link_status] eq {up}
+ } else {
+ fail "Instance #5 master link status is not up"
+ }
+}
+
+test "Make instance #0 unreachable without killing it" {
+ R 0 deferred 1
+ R 0 DEBUG SLEEP 10
+}
+
+test "Send CLUSTER FAILOVER to instance #5" {
+ R 5 cluster failover
+}
+
+test "Instance #5 is still a slave after some time (no failover)" {
+ after 5000
+ assert {[RI 5 role] eq {master}}
+}
+
+test "Wait for instance #0 to return back alive" {
+ R 0 deferred 0
+ assert {[R 0 read] eq {OK}}
+}
+
+## Check with "force" failover happens anyway.
+
+source "../tests/includes/init-tests.tcl"
+
+test "Create a 5 nodes cluster" {
+ create_cluster 5 5
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 0
+}
+
+test "Instance #5 is a slave" {
+ assert {[RI 5 role] eq {slave}}
+}
+
+test "Instance #5 synced with the master" {
+ wait_for_condition 1000 50 {
+ [RI 5 master_link_status] eq {up}
+ } else {
+ fail "Instance #5 master link status is not up"
+ }
+}
+
+test "Make instance #0 unreachable without killing it" {
+ R 0 deferred 1
+ R 0 DEBUG SLEEP 10
+}
+
+test "Send CLUSTER FAILOVER to instance #5" {
+ R 5 cluster failover force
+}
+
+test "Instance #5 is a master after some time" {
+ wait_for_condition 1000 50 {
+ [RI 5 role] eq {master}
+ } else {
+ fail "Instance #5 is not a master after some time regardless of FORCE"
+ }
+}
+
+test "Wait for instance #0 to return back alive" {
+ R 0 deferred 0
+ assert {[R 0 read] eq {OK}}
+}
diff --git a/tests/cluster/tests/11-manual-takeover.tcl b/tests/cluster/tests/11-manual-takeover.tcl
new file mode 100644
index 0000000..78a0f85
--- /dev/null
+++ b/tests/cluster/tests/11-manual-takeover.tcl
@@ -0,0 +1,71 @@
+# Manual takeover test
+
+source "../tests/includes/init-tests.tcl"
+
+test "Create a 5 nodes cluster" {
+ create_cluster 5 5
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 0
+}
+
+# For this test, disable replica failover until
+# all of the primaries are confirmed killed. Otherwise
+# there might be enough time to elect a replica.
+set replica_ids { 5 6 7 }
+foreach id $replica_ids {
+ R $id config set cluster-replica-no-failover yes
+}
+
+test "Killing majority of master nodes" {
+ kill_instance redis 0
+ kill_instance redis 1
+ kill_instance redis 2
+}
+
+foreach id $replica_ids {
+ R $id config set cluster-replica-no-failover no
+}
+
+test "Cluster should eventually be down" {
+ assert_cluster_state fail
+}
+
+test "Use takeover to bring slaves back" {
+ foreach id $replica_ids {
+ R $id cluster failover takeover
+ }
+}
+
+test "Cluster should eventually be up again" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 4
+}
+
+test "Instance #5, #6, #7 are now masters" {
+ foreach id $replica_ids {
+ assert {[RI $id role] eq {master}}
+ }
+}
+
+test "Restarting the previously killed master nodes" {
+ restart_instance redis 0
+ restart_instance redis 1
+ restart_instance redis 2
+}
+
+test "Instance #0, #1, #2 gets converted into a slaves" {
+ wait_for_condition 1000 50 {
+ [RI 0 role] eq {slave} && [RI 1 role] eq {slave} && [RI 2 role] eq {slave}
+ } else {
+ fail "Old masters not converted into slaves"
+ }
+}
diff --git a/tests/cluster/tests/12-replica-migration-2.tcl b/tests/cluster/tests/12-replica-migration-2.tcl
new file mode 100644
index 0000000..ed68006
--- /dev/null
+++ b/tests/cluster/tests/12-replica-migration-2.tcl
@@ -0,0 +1,75 @@
+# Replica migration test #2.
+#
+# Check that the status of master that can be targeted by replica migration
+# is acquired again, after being getting slots again, in a cluster where the
+# other masters have slaves.
+
+source "../tests/includes/init-tests.tcl"
+source "../../../tests/support/cli.tcl"
+
+# Create a cluster with 5 master and 15 slaves, to make sure there are no
+# empty masters and make rebalancing simpler to handle during the test.
+test "Create a 5 nodes cluster" {
+ cluster_create_with_continuous_slots 5 15
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "Each master should have at least two replicas attached" {
+ foreach_redis_id id {
+ if {$id < 5} {
+ wait_for_condition 1000 50 {
+ [llength [lindex [R $id role] 2]] >= 2
+ } else {
+ fail "Master #$id does not have 2 slaves as expected"
+ }
+ }
+ }
+}
+
+test "Set allow-replica-migration yes" {
+ foreach_redis_id id {
+ R $id CONFIG SET cluster-allow-replica-migration yes
+ }
+}
+
+set master0_id [dict get [get_myself 0] id]
+test "Resharding all the master #0 slots away from it" {
+ set output [exec \
+ ../../../src/redis-cli --cluster rebalance \
+ 127.0.0.1:[get_instance_attrib redis 0 port] \
+ {*}[rediscli_tls_config "../../../tests"] \
+ --cluster-weight ${master0_id}=0 >@ stdout ]
+
+}
+
+test "Master #0 who lost all slots should turn into a replica without replicas" {
+ wait_for_condition 1000 50 {
+ [RI 0 role] == "slave" && [RI 0 connected_slaves] == 0
+ } else {
+ puts [R 0 info replication]
+ fail "Master #0 didn't turn itself into a replica"
+ }
+}
+
+test "Resharding back some slot to master #0" {
+ # Wait for the cluster config to propagate before attempting a
+ # new resharding.
+ after 10000
+ set output [exec \
+ ../../../src/redis-cli --cluster rebalance \
+ 127.0.0.1:[get_instance_attrib redis 0 port] \
+ {*}[rediscli_tls_config "../../../tests"] \
+ --cluster-weight ${master0_id}=.01 \
+ --cluster-use-empty-masters >@ stdout]
+}
+
+test "Master #0 should re-acquire one or more replicas" {
+ wait_for_condition 1000 50 {
+ [llength [lindex [R 0 role] 2]] >= 1
+ } else {
+ fail "Master #0 has no has replicas"
+ }
+}
diff --git a/tests/cluster/tests/12.1-replica-migration-3.tcl b/tests/cluster/tests/12.1-replica-migration-3.tcl
new file mode 100644
index 0000000..790c732
--- /dev/null
+++ b/tests/cluster/tests/12.1-replica-migration-3.tcl
@@ -0,0 +1,65 @@
+# Replica migration test #2.
+#
+# Check that if 'cluster-allow-replica-migration' is set to 'no', slaves do not
+# migrate when master becomes empty.
+
+source "../tests/includes/init-tests.tcl"
+source "../tests/includes/utils.tcl"
+
+# Create a cluster with 5 master and 15 slaves, to make sure there are no
+# empty masters and make rebalancing simpler to handle during the test.
+test "Create a 5 nodes cluster" {
+ cluster_create_with_continuous_slots 5 15
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "Each master should have at least two replicas attached" {
+ foreach_redis_id id {
+ if {$id < 5} {
+ wait_for_condition 1000 50 {
+ [llength [lindex [R $id role] 2]] >= 2
+ } else {
+ fail "Master #$id does not have 2 slaves as expected"
+ }
+ }
+ }
+}
+
+test "Set allow-replica-migration no" {
+ foreach_redis_id id {
+ R $id CONFIG SET cluster-allow-replica-migration no
+ }
+}
+
+set master0_id [dict get [get_myself 0] id]
+test "Resharding all the master #0 slots away from it" {
+ set output [exec \
+ ../../../src/redis-cli --cluster rebalance \
+ 127.0.0.1:[get_instance_attrib redis 0 port] \
+ {*}[rediscli_tls_config "../../../tests"] \
+ --cluster-weight ${master0_id}=0 >@ stdout ]
+}
+
+test "Wait cluster to be stable" {
+ wait_cluster_stable
+}
+
+test "Master #0 still should have its replicas" {
+ assert { [llength [lindex [R 0 role] 2]] >= 2 }
+}
+
+test "Each master should have at least two replicas attached" {
+ foreach_redis_id id {
+ if {$id < 5} {
+ wait_for_condition 1000 50 {
+ [llength [lindex [R $id role] 2]] >= 2
+ } else {
+ fail "Master #$id does not have 2 slaves as expected"
+ }
+ }
+ }
+}
+
diff --git a/tests/cluster/tests/13-no-failover-option.tcl b/tests/cluster/tests/13-no-failover-option.tcl
new file mode 100644
index 0000000..befa598
--- /dev/null
+++ b/tests/cluster/tests/13-no-failover-option.tcl
@@ -0,0 +1,61 @@
+# Check that the no-failover option works
+
+source "../tests/includes/init-tests.tcl"
+
+test "Create a 5 nodes cluster" {
+ create_cluster 5 5
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 0
+}
+
+test "Instance #5 is a slave" {
+ assert {[RI 5 role] eq {slave}}
+
+ # Configure it to never failover the master
+ R 5 CONFIG SET cluster-slave-no-failover yes
+}
+
+test "Instance #5 synced with the master" {
+ wait_for_condition 1000 50 {
+ [RI 5 master_link_status] eq {up}
+ } else {
+ fail "Instance #5 master link status is not up"
+ }
+}
+
+test "The nofailover flag is propagated" {
+ set slave5_id [dict get [get_myself 5] id]
+
+ foreach_redis_id id {
+ wait_for_condition 1000 50 {
+ [has_flag [get_node_by_id $id $slave5_id] nofailover]
+ } else {
+ fail "Instance $id can't see the nofailover flag of slave"
+ }
+ }
+}
+
+set current_epoch [CI 1 cluster_current_epoch]
+
+test "Killing one master node" {
+ kill_instance redis 0
+}
+
+test "Cluster should be still down after some time" {
+ after 10000
+ assert_cluster_state fail
+}
+
+test "Instance #5 is still a slave" {
+ assert {[RI 5 role] eq {slave}}
+}
+
+test "Restarting the previously killed master node" {
+ restart_instance redis 0
+}
diff --git a/tests/cluster/tests/14-consistency-check.tcl b/tests/cluster/tests/14-consistency-check.tcl
new file mode 100644
index 0000000..e3b9a19
--- /dev/null
+++ b/tests/cluster/tests/14-consistency-check.tcl
@@ -0,0 +1,124 @@
+source "../tests/includes/init-tests.tcl"
+source "../../../tests/support/cli.tcl"
+
+test "Create a 5 nodes cluster" {
+ create_cluster 5 5
+}
+
+test "Cluster should start ok" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 0
+}
+
+proc find_non_empty_master {} {
+ set master_id_no {}
+ foreach_redis_id id {
+ if {[RI $id role] eq {master} && [R $id dbsize] > 0} {
+ set master_id_no $id
+ break
+ }
+ }
+ return $master_id_no
+}
+
+proc get_one_of_my_replica {id} {
+ wait_for_condition 1000 50 {
+ [llength [lindex [R $id role] 2]] > 0
+ } else {
+ fail "replicas didn't connect"
+ }
+ set replica_port [lindex [lindex [lindex [R $id role] 2] 0] 1]
+ set replica_id_num [get_instance_id_by_port redis $replica_port]
+ return $replica_id_num
+}
+
+proc cluster_write_keys_with_expire {id ttl} {
+ set prefix [randstring 20 20 alpha]
+ set port [get_instance_attrib redis $id port]
+ set cluster [redis_cluster 127.0.0.1:$port]
+ for {set j 100} {$j < 200} {incr j} {
+ $cluster setex key_expire.$j $ttl $prefix.$j
+ }
+ $cluster close
+}
+
+# make sure that replica who restarts from persistence will load keys
+# that have already expired, critical for correct execution of commands
+# that arrive from the master
+proc test_slave_load_expired_keys {aof} {
+ test "Slave expired keys is loaded when restarted: appendonly=$aof" {
+ set master_id [find_non_empty_master]
+ set replica_id [get_one_of_my_replica $master_id]
+
+ set master_dbsize_0 [R $master_id dbsize]
+ set replica_dbsize_0 [R $replica_id dbsize]
+ assert_equal $master_dbsize_0 $replica_dbsize_0
+
+ # config the replica persistency and rewrite the config file to survive restart
+ # note that this needs to be done before populating the volatile keys since
+ # that triggers and AOFRW, and we rather the AOF file to have 'SET PXAT' commands
+ # rather than an RDB with volatile keys
+ R $replica_id config set appendonly $aof
+ R $replica_id config rewrite
+
+ # fill with 100 keys with 3 second TTL
+ set data_ttl 3
+ cluster_write_keys_with_expire $master_id $data_ttl
+
+ # wait for replica to be in sync with master
+ wait_for_condition 500 10 {
+ [R $replica_id dbsize] eq [R $master_id dbsize]
+ } else {
+ fail "replica didn't sync"
+ }
+
+ set replica_dbsize_1 [R $replica_id dbsize]
+ assert {$replica_dbsize_1 > $replica_dbsize_0}
+
+ # make replica create persistence file
+ if {$aof == "yes"} {
+ # we need to wait for the initial AOFRW to be done, otherwise
+ # kill_instance (which now uses SIGTERM will fail ("Writing initial AOF, can't exit")
+ wait_for_condition 100 10 {
+ [RI $replica_id aof_rewrite_scheduled] eq 0 &&
+ [RI $replica_id aof_rewrite_in_progress] eq 0
+ } else {
+ fail "AOFRW didn't finish"
+ }
+ } else {
+ R $replica_id save
+ }
+
+ # kill the replica (would stay down until re-started)
+ kill_instance redis $replica_id
+
+ # Make sure the master doesn't do active expire (sending DELs to the replica)
+ R $master_id DEBUG SET-ACTIVE-EXPIRE 0
+
+ # wait for all the keys to get logically expired
+ after [expr $data_ttl*1000]
+
+ # start the replica again (loading an RDB or AOF file)
+ restart_instance redis $replica_id
+
+ # make sure the keys are still there
+ set replica_dbsize_3 [R $replica_id dbsize]
+ assert {$replica_dbsize_3 > $replica_dbsize_0}
+
+ # restore settings
+ R $master_id DEBUG SET-ACTIVE-EXPIRE 1
+
+ # wait for the master to expire all keys and replica to get the DELs
+ wait_for_condition 500 10 {
+ [R $replica_id dbsize] eq $master_dbsize_0
+ } else {
+ fail "keys didn't expire"
+ }
+ }
+}
+
+test_slave_load_expired_keys no
+test_slave_load_expired_keys yes
diff --git a/tests/cluster/tests/15-cluster-slots.tcl b/tests/cluster/tests/15-cluster-slots.tcl
new file mode 100644
index 0000000..892e904
--- /dev/null
+++ b/tests/cluster/tests/15-cluster-slots.tcl
@@ -0,0 +1,128 @@
+source "../tests/includes/init-tests.tcl"
+
+proc cluster_allocate_mixedSlots {n} {
+ set slot 16383
+ while {$slot >= 0} {
+ set node [expr {$slot % $n}]
+ lappend slots_$node $slot
+ incr slot -1
+ }
+ for {set j 0} {$j < $n} {incr j} {
+ R $j cluster addslots {*}[set slots_${j}]
+ }
+}
+
+proc create_cluster_with_mixedSlot {masters slaves} {
+ cluster_allocate_mixedSlots $masters
+ if {$slaves} {
+ cluster_allocate_slaves $masters $slaves
+ }
+ assert_cluster_state ok
+}
+
+test "Create a 5 nodes cluster" {
+ create_cluster_with_mixedSlot 5 15
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 0
+}
+
+test "Instance #5 is a slave" {
+ assert {[RI 5 role] eq {slave}}
+}
+
+test "client do not break when cluster slot" {
+ R 0 config set client-output-buffer-limit "normal 33554432 16777216 60"
+ if { [catch {R 0 cluster slots}] } {
+ fail "output overflow when cluster slots"
+ }
+}
+
+test "client can handle keys with hash tag" {
+ set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]]
+ $cluster set foo{tag} bar
+ $cluster close
+}
+
+test "slot migration is valid from primary to another primary" {
+ set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]]
+ set key order1
+ set slot [$cluster cluster keyslot $key]
+ array set nodefrom [$cluster masternode_for_slot $slot]
+ array set nodeto [$cluster masternode_notfor_slot $slot]
+
+ assert_equal {OK} [$nodefrom(link) cluster setslot $slot node $nodeto(id)]
+ assert_equal {OK} [$nodeto(link) cluster setslot $slot node $nodeto(id)]
+}
+
+test "slot migration is invalid from primary to replica" {
+ set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]]
+ set key order1
+ set slot [$cluster cluster keyslot $key]
+ array set nodefrom [$cluster masternode_for_slot $slot]
+
+ # Get replica node serving slot.
+ set replicanodeinfo [$cluster cluster replicas $nodefrom(id)]
+ puts $replicanodeinfo
+ set args [split $replicanodeinfo " "]
+ set replicaid [lindex [split [lindex $args 0] \{] 1]
+ puts $replicaid
+
+ catch {[$nodefrom(link) cluster setslot $slot node $replicaid]} err
+ assert_match "*Target node is not a master" $err
+}
+
+proc count_bound_slots {n} {
+ set slot_count 0
+ foreach slot_range_mapping [$n cluster slots] {
+ set start_slot [lindex $slot_range_mapping 0]
+ set end_slot [lindex $slot_range_mapping 1]
+ incr slot_count [expr $end_slot - $start_slot + 1]
+ }
+ return $slot_count
+ }
+
+ test "slot must be unbound on the owner when it is deleted" {
+ set node0 [Rn 0]
+ set node1 [Rn 1]
+ assert {[count_bound_slots $node0] eq 16384}
+ assert {[count_bound_slots $node1] eq 16384}
+
+ set slot_to_delete 0
+ # Delete
+ $node0 CLUSTER DELSLOTS $slot_to_delete
+
+ # Verify
+ # The node that owns the slot must unbind the slot that was deleted
+ wait_for_condition 1000 50 {
+ [count_bound_slots $node0] == 16383
+ } else {
+ fail "Cluster slot deletion was not recorded on the node that owns the slot"
+ }
+
+ # We don't propagate slot deletion across all nodes in the cluster.
+ # This can lead to extra redirect before the clients find out that the slot is unbound.
+ wait_for_condition 1000 50 {
+ [count_bound_slots $node1] == 16384
+ } else {
+ fail "Cluster slot deletion should not be propagated to all nodes in the cluster"
+ }
+ }
+
+if {$::tls} {
+ test {CLUSTER SLOTS from non-TLS client in TLS cluster} {
+ set slots_tls [R 0 cluster slots]
+ set host [get_instance_attrib redis 0 host]
+ set plaintext_port [get_instance_attrib redis 0 plaintext-port]
+ set client_plain [redis $host $plaintext_port 0 0]
+ set slots_plain [$client_plain cluster slots]
+ $client_plain close
+ # Compare the ports in the first row
+ assert_no_match [lindex $slots_tls 0 3 1] [lindex $slots_plain 0 3 1]
+ }
+} \ No newline at end of file
diff --git a/tests/cluster/tests/16-transactions-on-replica.tcl b/tests/cluster/tests/16-transactions-on-replica.tcl
new file mode 100644
index 0000000..8bec06e
--- /dev/null
+++ b/tests/cluster/tests/16-transactions-on-replica.tcl
@@ -0,0 +1,85 @@
+# Check basic transactions on a replica.
+
+source "../tests/includes/init-tests.tcl"
+
+test "Create a primary with a replica" {
+ create_cluster 1 1
+}
+
+test "Cluster should start ok" {
+ assert_cluster_state ok
+}
+
+set primary [Rn 0]
+set replica [Rn 1]
+
+test "Can't read from replica without READONLY" {
+ $primary SET a 1
+ wait_for_ofs_sync $primary $replica
+ catch {$replica GET a} err
+ assert {[string range $err 0 4] eq {MOVED}}
+}
+
+test "Can't read from replica after READWRITE" {
+ $replica READWRITE
+ catch {$replica GET a} err
+ assert {[string range $err 0 4] eq {MOVED}}
+}
+
+test "Can read from replica after READONLY" {
+ $replica READONLY
+ assert {[$replica GET a] eq {1}}
+}
+
+test "Can perform HSET primary and HGET from replica" {
+ $primary HSET h a 1
+ $primary HSET h b 2
+ $primary HSET h c 3
+ wait_for_ofs_sync $primary $replica
+ assert {[$replica HGET h a] eq {1}}
+ assert {[$replica HGET h b] eq {2}}
+ assert {[$replica HGET h c] eq {3}}
+}
+
+test "Can MULTI-EXEC transaction of HGET operations from replica" {
+ $replica MULTI
+ assert {[$replica HGET h a] eq {QUEUED}}
+ assert {[$replica HGET h b] eq {QUEUED}}
+ assert {[$replica HGET h c] eq {QUEUED}}
+ assert {[$replica EXEC] eq {1 2 3}}
+}
+
+test "MULTI-EXEC with write operations is MOVED" {
+ $replica MULTI
+ catch {$replica HSET h b 4} err
+ assert {[string range $err 0 4] eq {MOVED}}
+ catch {$replica exec} err
+ assert {[string range $err 0 8] eq {EXECABORT}}
+}
+
+test "read-only blocking operations from replica" {
+ set rd [redis_deferring_client redis 1]
+ $rd readonly
+ $rd read
+ $rd XREAD BLOCK 0 STREAMS k 0
+
+ wait_for_condition 1000 50 {
+ [RI 1 blocked_clients] eq {1}
+ } else {
+ fail "client wasn't blocked"
+ }
+
+ $primary XADD k * foo bar
+ set res [$rd read]
+ set res [lindex [lindex [lindex [lindex $res 0] 1] 0] 1]
+ assert {$res eq {foo bar}}
+ $rd close
+}
+
+test "reply MOVED when eval from replica for update" {
+ catch {[$replica eval {#!lua
+ return redis.call('del','a')
+ } 1 a
+ ]} err
+ assert {[string range $err 0 4] eq {MOVED}}
+} \ No newline at end of file
diff --git a/tests/cluster/tests/17-diskless-load-swapdb.tcl b/tests/cluster/tests/17-diskless-load-swapdb.tcl
new file mode 100644
index 0000000..7a56ec7
--- /dev/null
+++ b/tests/cluster/tests/17-diskless-load-swapdb.tcl
@@ -0,0 +1,86 @@
+# Check that replica keys and keys to slots map are right after failing to diskless load using SWAPDB.
+
+source "../tests/includes/init-tests.tcl"
+
+test "Create a primary with a replica" {
+ create_cluster 1 1
+}
+
+test "Cluster should start ok" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 0
+}
+
+test "Main db not affected when fail to diskless load" {
+ set master [Rn 0]
+ set replica [Rn 1]
+ set master_id 0
+ set replica_id 1
+
+ $replica READONLY
+ $replica config set repl-diskless-load swapdb
+ $replica config set appendonly no
+ $replica config set save ""
+ $replica config rewrite
+ $master config set repl-backlog-size 1024
+ $master config set repl-diskless-sync yes
+ $master config set repl-diskless-sync-delay 0
+ $master config set rdb-key-save-delay 10000
+ $master config set rdbcompression no
+ $master config set appendonly no
+ $master config set save ""
+
+ # Write a key that belongs to slot 0
+ set slot0_key "06S"
+ $master set $slot0_key 1
+ wait_for_ofs_sync $master $replica
+ assert_equal {1} [$replica get $slot0_key]
+ assert_equal $slot0_key [$replica CLUSTER GETKEYSINSLOT 0 1]
+
+ # Save an RDB and kill the replica
+ $replica save
+ kill_instance redis $replica_id
+
+ # Delete the key from master
+ $master del $slot0_key
+
+ # Replica must full sync with master when start because replication
+ # backlog size is very small, and dumping rdb will cost several seconds.
+ set num 10000
+ set value [string repeat A 1024]
+ set rd [redis_deferring_client redis $master_id]
+ for {set j 0} {$j < $num} {incr j} {
+ $rd set $j $value
+ }
+ for {set j 0} {$j < $num} {incr j} {
+ $rd read
+ }
+
+ # Start the replica again
+ restart_instance redis $replica_id
+ $replica READONLY
+
+ # Start full sync, wait till after db started loading in background
+ wait_for_condition 500 10 {
+ [s $replica_id async_loading] eq 1
+ } else {
+ fail "Fail to full sync"
+ }
+
+ # Kill master, abort full sync
+ kill_instance redis $master_id
+
+ # Start full sync, wait till the replica detects the disconnection
+ wait_for_condition 500 10 {
+ [s $replica_id async_loading] eq 0
+ } else {
+ fail "Fail to full sync"
+ }
+
+ # Replica keys and keys to slots map still both are right
+ assert_equal {1} [$replica get $slot0_key]
+ assert_equal $slot0_key [$replica CLUSTER GETKEYSINSLOT 0 1]
+}
diff --git a/tests/cluster/tests/18-info.tcl b/tests/cluster/tests/18-info.tcl
new file mode 100644
index 0000000..68c62d3
--- /dev/null
+++ b/tests/cluster/tests/18-info.tcl
@@ -0,0 +1,45 @@
+# Check cluster info stats
+
+source "../tests/includes/init-tests.tcl"
+
+test "Create a primary with a replica" {
+ create_cluster 2 0
+}
+
+test "Cluster should start ok" {
+ assert_cluster_state ok
+}
+
+set primary1 [Rn 0]
+set primary2 [Rn 1]
+
+proc cmdstat {instance cmd} {
+ return [cmdrstat $cmd $instance]
+}
+
+proc errorstat {instance cmd} {
+ return [errorrstat $cmd $instance]
+}
+
+test "errorstats: rejected call due to MOVED Redirection" {
+ $primary1 config resetstat
+ $primary2 config resetstat
+ assert_match {} [errorstat $primary1 MOVED]
+ assert_match {} [errorstat $primary2 MOVED]
+ # we know that one will have a MOVED reply and one will succeed
+ catch {$primary1 set key b} replyP1
+ catch {$primary2 set key b} replyP2
+ # sort servers so we know which one failed
+ if {$replyP1 eq {OK}} {
+ assert_match {MOVED*} $replyP2
+ set pok $primary1
+ set perr $primary2
+ } else {
+ assert_match {MOVED*} $replyP1
+ set pok $primary2
+ set perr $primary1
+ }
+ assert_match {} [errorstat $pok MOVED]
+ assert_match {*count=1*} [errorstat $perr MOVED]
+ assert_match {*calls=0,*,rejected_calls=1,failed_calls=0} [cmdstat $perr set]
+}
diff --git a/tests/cluster/tests/19-cluster-nodes-slots.tcl b/tests/cluster/tests/19-cluster-nodes-slots.tcl
new file mode 100644
index 0000000..77faec9
--- /dev/null
+++ b/tests/cluster/tests/19-cluster-nodes-slots.tcl
@@ -0,0 +1,50 @@
+# Optimize CLUSTER NODES command by generating all nodes slot topology firstly
+
+source "../tests/includes/init-tests.tcl"
+
+test "Create a 2 nodes cluster" {
+ cluster_create_with_continuous_slots 2 2
+}
+
+test "Cluster should start ok" {
+ assert_cluster_state ok
+}
+
+set master1 [Rn 0]
+set master2 [Rn 1]
+
+test "Continuous slots distribution" {
+ assert_match "* 0-8191*" [$master1 CLUSTER NODES]
+ assert_match "* 8192-16383*" [$master2 CLUSTER NODES]
+ assert_match "*0 8191*" [$master1 CLUSTER SLOTS]
+ assert_match "*8192 16383*" [$master2 CLUSTER SLOTS]
+
+ $master1 CLUSTER DELSLOTS 4096
+ assert_match "* 0-4095 4097-8191*" [$master1 CLUSTER NODES]
+ assert_match "*0 4095*4097 8191*" [$master1 CLUSTER SLOTS]
+
+
+ $master2 CLUSTER DELSLOTS 12288
+ assert_match "* 8192-12287 12289-16383*" [$master2 CLUSTER NODES]
+ assert_match "*8192 12287*12289 16383*" [$master2 CLUSTER SLOTS]
+}
+
+test "Discontinuous slots distribution" {
+ # Remove middle slots
+ $master1 CLUSTER DELSLOTS 4092 4094
+ assert_match "* 0-4091 4093 4095 4097-8191*" [$master1 CLUSTER NODES]
+ assert_match "*0 4091*4093 4093*4095 4095*4097 8191*" [$master1 CLUSTER SLOTS]
+ $master2 CLUSTER DELSLOTS 12284 12286
+ assert_match "* 8192-12283 12285 12287 12289-16383*" [$master2 CLUSTER NODES]
+ assert_match "*8192 12283*12285 12285*12287 12287*12289 16383*" [$master2 CLUSTER SLOTS]
+
+ # Remove head slots
+ $master1 CLUSTER DELSLOTS 0 2
+ assert_match "* 1 3-4091 4093 4095 4097-8191*" [$master1 CLUSTER NODES]
+ assert_match "*1 1*3 4091*4093 4093*4095 4095*4097 8191*" [$master1 CLUSTER SLOTS]
+
+ # Remove tail slots
+ $master2 CLUSTER DELSLOTS 16380 16382 16383
+ assert_match "* 8192-12283 12285 12287 12289-16379 16381*" [$master2 CLUSTER NODES]
+ assert_match "*8192 12283*12285 12285*12287 12287*12289 16379*16381 16381*" [$master2 CLUSTER SLOTS]
+}
diff --git a/tests/cluster/tests/20-half-migrated-slot.tcl b/tests/cluster/tests/20-half-migrated-slot.tcl
new file mode 100644
index 0000000..229b3a8
--- /dev/null
+++ b/tests/cluster/tests/20-half-migrated-slot.tcl
@@ -0,0 +1,98 @@
+# Tests for fixing migrating slot at all stages:
+# 1. when migration is half inited on "migrating" node
+# 2. when migration is half inited on "importing" node
+# 3. migration inited, but not finished
+# 4. migration is half finished on "migrating" node
+# 5. migration is half finished on "importing" node
+
+# TODO: Test is currently disabled until it is stabilized (fixing the test
+# itself or real issues in Redis).
+
+if {false} {
+source "../tests/includes/init-tests.tcl"
+source "../tests/includes/utils.tcl"
+
+test "Create a 2 nodes cluster" {
+ create_cluster 2 0
+ config_set_all_nodes cluster-allow-replica-migration no
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]]
+catch {unset nodefrom}
+catch {unset nodeto}
+
+proc reset_cluster {} {
+ uplevel 1 {
+ $cluster refresh_nodes_map
+ array set nodefrom [$cluster masternode_for_slot 609]
+ array set nodeto [$cluster masternode_notfor_slot 609]
+ }
+}
+
+reset_cluster
+
+$cluster set aga xyz
+
+test "Half init migration in 'migrating' is fixable" {
+ assert_equal {OK} [$nodefrom(link) cluster setslot 609 migrating $nodeto(id)]
+ fix_cluster $nodefrom(addr)
+ assert_equal "xyz" [$cluster get aga]
+}
+
+test "Half init migration in 'importing' is fixable" {
+ assert_equal {OK} [$nodeto(link) cluster setslot 609 importing $nodefrom(id)]
+ fix_cluster $nodefrom(addr)
+ assert_equal "xyz" [$cluster get aga]
+}
+
+test "Init migration and move key" {
+ assert_equal {OK} [$nodefrom(link) cluster setslot 609 migrating $nodeto(id)]
+ assert_equal {OK} [$nodeto(link) cluster setslot 609 importing $nodefrom(id)]
+ assert_equal {OK} [$nodefrom(link) migrate $nodeto(host) $nodeto(port) aga 0 10000]
+ wait_for_cluster_propagation
+ assert_equal "xyz" [$cluster get aga]
+ fix_cluster $nodefrom(addr)
+ assert_equal "xyz" [$cluster get aga]
+}
+
+reset_cluster
+
+test "Move key again" {
+ wait_for_cluster_propagation
+ assert_equal {OK} [$nodefrom(link) cluster setslot 609 migrating $nodeto(id)]
+ assert_equal {OK} [$nodeto(link) cluster setslot 609 importing $nodefrom(id)]
+ assert_equal {OK} [$nodefrom(link) migrate $nodeto(host) $nodeto(port) aga 0 10000]
+ wait_for_cluster_propagation
+ assert_equal "xyz" [$cluster get aga]
+}
+
+test "Half-finish migration" {
+ # half finish migration on 'migrating' node
+ assert_equal {OK} [$nodefrom(link) cluster setslot 609 node $nodeto(id)]
+ fix_cluster $nodefrom(addr)
+ assert_equal "xyz" [$cluster get aga]
+}
+
+reset_cluster
+
+test "Move key back" {
+ # 'aga' key is in 609 slot
+ assert_equal {OK} [$nodefrom(link) cluster setslot 609 migrating $nodeto(id)]
+ assert_equal {OK} [$nodeto(link) cluster setslot 609 importing $nodefrom(id)]
+ assert_equal {OK} [$nodefrom(link) migrate $nodeto(host) $nodeto(port) aga 0 10000]
+ assert_equal "xyz" [$cluster get aga]
+}
+
+test "Half-finish importing" {
+ # Now we half finish 'importing' node
+ assert_equal {OK} [$nodeto(link) cluster setslot 609 node $nodeto(id)]
+ fix_cluster $nodefrom(addr)
+ assert_equal "xyz" [$cluster get aga]
+}
+
+config_set_all_nodes cluster-allow-replica-migration yes
+}
diff --git a/tests/cluster/tests/21-many-slot-migration.tcl b/tests/cluster/tests/21-many-slot-migration.tcl
new file mode 100644
index 0000000..1ac73dc
--- /dev/null
+++ b/tests/cluster/tests/21-many-slot-migration.tcl
@@ -0,0 +1,64 @@
+# Tests for many simultaneous migrations.
+
+# TODO: Test is currently disabled until it is stabilized (fixing the test
+# itself or real issues in Redis).
+
+if {false} {
+
+source "../tests/includes/init-tests.tcl"
+source "../tests/includes/utils.tcl"
+
+# TODO: This test currently runs without replicas, as failovers (which may
+# happen on lower-end CI platforms) are still not handled properly by the
+# cluster during slot migration (related to #6339).
+
+test "Create a 10 nodes cluster" {
+ create_cluster 10 0
+ config_set_all_nodes cluster-allow-replica-migration no
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]]
+catch {unset nodefrom}
+catch {unset nodeto}
+
+$cluster refresh_nodes_map
+
+test "Set many keys" {
+ for {set i 0} {$i < 40000} {incr i} {
+ $cluster set key:$i val:$i
+ }
+}
+
+test "Keys are accessible" {
+ for {set i 0} {$i < 40000} {incr i} {
+ assert { [$cluster get key:$i] eq "val:$i" }
+ }
+}
+
+test "Init migration of many slots" {
+ for {set slot 0} {$slot < 1000} {incr slot} {
+ array set nodefrom [$cluster masternode_for_slot $slot]
+ array set nodeto [$cluster masternode_notfor_slot $slot]
+
+ $nodefrom(link) cluster setslot $slot migrating $nodeto(id)
+ $nodeto(link) cluster setslot $slot importing $nodefrom(id)
+ }
+}
+
+test "Fix cluster" {
+ wait_for_cluster_propagation
+ fix_cluster $nodefrom(addr)
+}
+
+test "Keys are accessible" {
+ for {set i 0} {$i < 40000} {incr i} {
+ assert { [$cluster get key:$i] eq "val:$i" }
+ }
+}
+
+config_set_all_nodes cluster-allow-replica-migration yes
+}
diff --git a/tests/cluster/tests/22-replica-in-sync.tcl b/tests/cluster/tests/22-replica-in-sync.tcl
new file mode 100644
index 0000000..b5645aa
--- /dev/null
+++ b/tests/cluster/tests/22-replica-in-sync.tcl
@@ -0,0 +1,146 @@
+source "../tests/includes/init-tests.tcl"
+
+test "Create a 1 node cluster" {
+ create_cluster 1 0
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+test "Cluster is writable" {
+ cluster_write_test 0
+}
+
+proc is_in_slots {master_id replica} {
+ set slots [R $master_id cluster slots]
+ set found_position [string first $replica $slots]
+ set result [expr {$found_position != -1}]
+ return $result
+}
+
+proc is_replica_online {info_repl} {
+ set found_position [string first "state=online" $info_repl]
+ set result [expr {$found_position != -1}]
+ return $result
+}
+
+proc get_last_pong_time {node_id target_cid} {
+ foreach item [split [R $node_id cluster nodes] \n] {
+ set args [split $item " "]
+ if {[lindex $args 0] eq $target_cid} {
+ return [lindex $args 5]
+ }
+ }
+ fail "Target node ID was not present"
+}
+
+set master_id 0
+
+test "Fill up primary with data" {
+ # Set 1 MB of data
+ R $master_id debug populate 1000 key 1000
+}
+
+test "Add new node as replica" {
+ set replica_id 1
+ set replica [R $replica_id CLUSTER MYID]
+ R $replica_id cluster replicate [R $master_id CLUSTER MYID]
+}
+
+test "Check digest and replica state" {
+ wait_for_condition 1000 50 {
+ [is_in_slots $master_id $replica]
+ } else {
+ fail "New replica didn't appear in the slots"
+ }
+
+ wait_for_condition 100 50 {
+ [is_replica_online [R $master_id info replication]]
+ } else {
+ fail "Replica is down for too long"
+ }
+ set replica_digest [R $replica_id debug digest]
+ assert {$replica_digest ne 0}
+}
+
+test "Replica in loading state is hidden" {
+ # Kill replica client for master and load new data to the primary
+ R $master_id config set repl-backlog-size 100
+
+ # Set the key load delay so that it will take at least
+ # 2 seconds to fully load the data.
+ R $replica_id config set key-load-delay 4000
+
+ # Trigger event loop processing every 1024 bytes, this trigger
+ # allows us to send and receive cluster messages, so we are setting
+ # it low so that the cluster messages are sent more frequently.
+ R $replica_id config set loading-process-events-interval-bytes 1024
+
+ R $master_id multi
+ R $master_id client kill type replica
+ set num 100
+ set value [string repeat A 1024]
+ for {set j 0} {$j < $num} {incr j} {
+ set key "{0}"
+ append key $j
+ R $master_id set $key $value
+ }
+ R $master_id exec
+
+ # The master will be the last to know the replica
+ # is loading, so we will wait on that and assert
+ # the replica is loading afterwards.
+ wait_for_condition 100 50 {
+ ![is_in_slots $master_id $replica]
+ } else {
+ fail "Replica was always present in cluster slots"
+ }
+ assert_equal 1 [s $replica_id loading]
+
+ # Wait for the replica to finish full-sync and become online
+ wait_for_condition 200 50 {
+ [s $replica_id master_link_status] eq "up"
+ } else {
+ fail "Replica didn't finish loading"
+ }
+
+ # Return configs to default values
+ R $replica_id config set loading-process-events-interval-bytes 2097152
+ R $replica_id config set key-load-delay 0
+
+ # Check replica is back in cluster slots
+ wait_for_condition 100 50 {
+ [is_in_slots $master_id $replica]
+ } else {
+ fail "Replica is not back to slots"
+ }
+ assert_equal 1 [is_in_slots $replica_id $replica]
+}
+
+test "Check disconnected replica not hidden from slots" {
+ # We want to disconnect the replica, but keep it alive so it can still gossip
+
+ # Make sure that the replica will not be able to re-connect to the master
+ R $master_id config set requirepass asdf
+
+ # Disconnect replica from primary
+ R $master_id client kill type replica
+
+ # Check master to have no replicas
+ assert {[s $master_id connected_slaves] == 0}
+
+ set replica_cid [R $replica_id cluster myid]
+ set initial_pong [get_last_pong_time $master_id $replica_cid]
+ wait_for_condition 50 100 {
+ $initial_pong != [get_last_pong_time $master_id $replica_cid]
+ } else {
+ fail "Primary never received gossip from replica"
+ }
+
+ # Check that replica is still in the cluster slots
+ assert {[is_in_slots $master_id $replica]}
+
+ # undo config
+ R $master_id config set requirepass ""
+}
diff --git a/tests/cluster/tests/25-pubsubshard-slot-migration.tcl b/tests/cluster/tests/25-pubsubshard-slot-migration.tcl
new file mode 100644
index 0000000..0f59ffe
--- /dev/null
+++ b/tests/cluster/tests/25-pubsubshard-slot-migration.tcl
@@ -0,0 +1,171 @@
+source "../tests/includes/init-tests.tcl"
+
+test "Create a 3 nodes cluster" {
+ cluster_create_with_continuous_slots 3 3
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]]
+
+test "Migrate a slot, verify client receives sunsubscribe on primary serving the slot." {
+
+ # Setup the to and from node
+ set channelname mychannel
+ set slot [$cluster cluster keyslot $channelname]
+ array set nodefrom [$cluster masternode_for_slot $slot]
+ array set nodeto [$cluster masternode_notfor_slot $slot]
+
+ set subscribeclient [redis_deferring_client_by_addr $nodefrom(host) $nodefrom(port)]
+
+ $subscribeclient deferred 1
+ $subscribeclient ssubscribe $channelname
+ $subscribeclient read
+
+ assert_equal {OK} [$nodefrom(link) cluster setslot $slot migrating $nodeto(id)]
+ assert_equal {OK} [$nodeto(link) cluster setslot $slot importing $nodefrom(id)]
+
+ # Verify subscribe is still valid, able to receive messages.
+ $nodefrom(link) spublish $channelname hello
+ assert_equal {smessage mychannel hello} [$subscribeclient read]
+
+ assert_equal {OK} [$nodefrom(link) cluster setslot $slot node $nodeto(id)]
+
+ set msg [$subscribeclient read]
+ assert {"sunsubscribe" eq [lindex $msg 0]}
+ assert {$channelname eq [lindex $msg 1]}
+ assert {"0" eq [lindex $msg 2]}
+
+ assert_equal {OK} [$nodeto(link) cluster setslot $slot node $nodeto(id)]
+
+ $subscribeclient close
+}
+
+test "Client subscribes to multiple channels, migrate a slot, verify client receives sunsubscribe on primary serving the slot." {
+
+ # Setup the to and from node
+ set channelname ch3
+ set anotherchannelname ch7
+ set slot [$cluster cluster keyslot $channelname]
+ array set nodefrom [$cluster masternode_for_slot $slot]
+ array set nodeto [$cluster masternode_notfor_slot $slot]
+
+ set subscribeclient [redis_deferring_client_by_addr $nodefrom(host) $nodefrom(port)]
+
+ $subscribeclient deferred 1
+ $subscribeclient ssubscribe $channelname
+ $subscribeclient read
+
+ $subscribeclient ssubscribe $anotherchannelname
+ $subscribeclient read
+
+ assert_equal {OK} [$nodefrom(link) cluster setslot $slot migrating $nodeto(id)]
+ assert_equal {OK} [$nodeto(link) cluster setslot $slot importing $nodefrom(id)]
+
+ # Verify subscribe is still valid, able to receive messages.
+ $nodefrom(link) spublish $channelname hello
+ assert_equal {smessage ch3 hello} [$subscribeclient read]
+
+ assert_equal {OK} [$nodefrom(link) cluster setslot $slot node $nodeto(id)]
+
+ # Verify the client receives sunsubscribe message for the channel(slot) which got migrated.
+ set msg [$subscribeclient read]
+ assert {"sunsubscribe" eq [lindex $msg 0]}
+ assert {$channelname eq [lindex $msg 1]}
+ assert {"1" eq [lindex $msg 2]}
+
+ assert_equal {OK} [$nodeto(link) cluster setslot $slot node $nodeto(id)]
+
+ $nodefrom(link) spublish $anotherchannelname hello
+
+ # Verify the client is still connected and receives message from the other channel.
+ set msg [$subscribeclient read]
+ assert {"smessage" eq [lindex $msg 0]}
+ assert {$anotherchannelname eq [lindex $msg 1]}
+ assert {"hello" eq [lindex $msg 2]}
+
+ $subscribeclient close
+}
+
+test "Migrate a slot, verify client receives sunsubscribe on replica serving the slot." {
+
+ # Setup the to and from node
+ set channelname mychannel1
+ set slot [$cluster cluster keyslot $channelname]
+ array set nodefrom [$cluster masternode_for_slot $slot]
+ array set nodeto [$cluster masternode_notfor_slot $slot]
+
+ # Get replica node serving slot (mychannel) to connect a client.
+ set replicanodeinfo [$cluster cluster replicas $nodefrom(id)]
+ set args [split $replicanodeinfo " "]
+ set addr [lindex [split [lindex $args 1] @] 0]
+ set replicahost [lindex [split $addr :] 0]
+ set replicaport [lindex [split $addr :] 1]
+ set subscribeclient [redis_deferring_client_by_addr $replicahost $replicaport]
+
+ $subscribeclient deferred 1
+ $subscribeclient ssubscribe $channelname
+ $subscribeclient read
+
+ assert_equal {OK} [$nodefrom(link) cluster setslot $slot migrating $nodeto(id)]
+ assert_equal {OK} [$nodeto(link) cluster setslot $slot importing $nodefrom(id)]
+
+ # Verify subscribe is still valid, able to receive messages.
+ $nodefrom(link) spublish $channelname hello
+ assert_equal {smessage mychannel1 hello} [$subscribeclient read]
+
+ assert_equal {OK} [$nodefrom(link) cluster setslot $slot node $nodeto(id)]
+ assert_equal {OK} [$nodeto(link) cluster setslot $slot node $nodeto(id)]
+
+ set msg [$subscribeclient read]
+ assert {"sunsubscribe" eq [lindex $msg 0]}
+ assert {$channelname eq [lindex $msg 1]}
+ assert {"0" eq [lindex $msg 2]}
+
+ $subscribeclient close
+}
+
+test "Delete a slot, verify sunsubscribe message" {
+ set channelname ch2
+ set slot [$cluster cluster keyslot $channelname]
+
+ array set primary_client [$cluster masternode_for_slot $slot]
+
+ set subscribeclient [redis_deferring_client_by_addr $primary_client(host) $primary_client(port)]
+ $subscribeclient deferred 1
+ $subscribeclient ssubscribe $channelname
+ $subscribeclient read
+
+ $primary_client(link) cluster DELSLOTS $slot
+
+ set msg [$subscribeclient read]
+ assert {"sunsubscribe" eq [lindex $msg 0]}
+ assert {$channelname eq [lindex $msg 1]}
+ assert {"0" eq [lindex $msg 2]}
+
+ $subscribeclient close
+}
+
+test "Reset cluster, verify sunsubscribe message" {
+ set channelname ch4
+ set slot [$cluster cluster keyslot $channelname]
+
+ array set primary_client [$cluster masternode_for_slot $slot]
+
+ set subscribeclient [redis_deferring_client_by_addr $primary_client(host) $primary_client(port)]
+ $subscribeclient deferred 1
+ $subscribeclient ssubscribe $channelname
+ $subscribeclient read
+
+ $cluster cluster reset HARD
+
+ set msg [$subscribeclient read]
+ assert {"sunsubscribe" eq [lindex $msg 0]}
+ assert {$channelname eq [lindex $msg 1]}
+ assert {"0" eq [lindex $msg 2]}
+
+ $cluster close
+ $subscribeclient close
+} \ No newline at end of file
diff --git a/tests/cluster/tests/26-pubsubshard.tcl b/tests/cluster/tests/26-pubsubshard.tcl
new file mode 100644
index 0000000..2619eda
--- /dev/null
+++ b/tests/cluster/tests/26-pubsubshard.tcl
@@ -0,0 +1,94 @@
+# Test PUBSUB shard propagation in a cluster slot.
+
+source "../tests/includes/init-tests.tcl"
+
+test "Create a 3 nodes cluster" {
+ cluster_create_with_continuous_slots 3 3
+}
+
+set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]]
+test "Pub/Sub shard basics" {
+
+ set slot [$cluster cluster keyslot "channel.0"]
+ array set publishnode [$cluster masternode_for_slot $slot]
+ array set notshardnode [$cluster masternode_notfor_slot $slot]
+
+ set publishclient [redis_client_by_addr $publishnode(host) $publishnode(port)]
+ set subscribeclient [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)]
+ set subscribeclient2 [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)]
+ set anotherclient [redis_deferring_client_by_addr $notshardnode(host) $notshardnode(port)]
+
+ $subscribeclient ssubscribe channel.0
+ $subscribeclient read
+
+ $subscribeclient2 ssubscribe channel.0
+ $subscribeclient2 read
+
+ $anotherclient ssubscribe channel.0
+ catch {$anotherclient read} err
+ assert_match {MOVED *} $err
+
+ set data [randomValue]
+ $publishclient spublish channel.0 $data
+
+ set msg [$subscribeclient read]
+ assert_equal $data [lindex $msg 2]
+
+ set msg [$subscribeclient2 read]
+ assert_equal $data [lindex $msg 2]
+
+ $publishclient close
+ $subscribeclient close
+ $subscribeclient2 close
+ $anotherclient close
+}
+
+test "client can't subscribe to multiple shard channels across different slots in same call" {
+ catch {$cluster ssubscribe channel.0 channel.1} err
+ assert_match {CROSSSLOT Keys*} $err
+}
+
+test "client can subscribe to multiple shard channels across different slots in separate call" {
+ $cluster ssubscribe ch3
+ $cluster ssubscribe ch7
+
+ $cluster sunsubscribe ch3
+ $cluster sunsubscribe ch7
+}
+
+
+test "Verify Pub/Sub and Pub/Sub shard no overlap" {
+ set slot [$cluster cluster keyslot "channel.0"]
+ array set publishnode [$cluster masternode_for_slot $slot]
+ array set notshardnode [$cluster masternode_notfor_slot $slot]
+
+ set publishshardclient [redis_client_by_addr $publishnode(host) $publishnode(port)]
+ set publishclient [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)]
+ set subscribeshardclient [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)]
+ set subscribeclient [redis_deferring_client_by_addr $publishnode(host) $publishnode(port)]
+
+ $subscribeshardclient deferred 1
+ $subscribeshardclient ssubscribe channel.0
+ $subscribeshardclient read
+
+ $subscribeclient deferred 1
+ $subscribeclient subscribe channel.0
+ $subscribeclient read
+
+ set sharddata "testingpubsubdata"
+ $publishshardclient spublish channel.0 $sharddata
+
+ set data "somemoredata"
+ $publishclient publish channel.0 $data
+
+ set msg [$subscribeshardclient read]
+ assert_equal $sharddata [lindex $msg 2]
+
+ set msg [$subscribeclient read]
+ assert_equal $data [lindex $msg 2]
+
+ $cluster close
+ $publishclient close
+ $subscribeclient close
+ $subscribeshardclient close
+} \ No newline at end of file
diff --git a/tests/cluster/tests/28-cluster-shards.tcl b/tests/cluster/tests/28-cluster-shards.tcl
new file mode 100644
index 0000000..f24b917
--- /dev/null
+++ b/tests/cluster/tests/28-cluster-shards.tcl
@@ -0,0 +1,287 @@
+source "../tests/includes/init-tests.tcl"
+
+# Initial slot distribution.
+set ::slot0 [list 0 1000 1002 5459 5461 5461 10926 10926]
+set ::slot1 [list 5460 5460 5462 10922 10925 10925]
+set ::slot2 [list 10923 10924 10927 16383]
+set ::slot3 [list 1001 1001]
+
+proc cluster_create_with_split_slots {masters replicas} {
+ for {set j 0} {$j < $masters} {incr j} {
+ R $j cluster ADDSLOTSRANGE {*}[set ::slot${j}]
+ }
+ if {$replicas} {
+ cluster_allocate_slaves $masters $replicas
+ }
+ set ::cluster_master_nodes $masters
+ set ::cluster_replica_nodes $replicas
+}
+
+# Get the node info with the specific node_id from the
+# given reference node. Valid type options are "node" and "shard"
+proc get_node_info_from_shard {id reference {type node}} {
+ set shards_response [R $reference CLUSTER SHARDS]
+ foreach shard_response $shards_response {
+ set nodes [dict get $shard_response nodes]
+ foreach node $nodes {
+ if {[dict get $node id] eq $id} {
+ if {$type eq "node"} {
+ return $node
+ } elseif {$type eq "shard"} {
+ return $shard_response
+ } else {
+ return {}
+ }
+ }
+ }
+ }
+ # No shard found, return nothing
+ return {}
+}
+
+proc cluster_ensure_master {id} {
+ if { [regexp "master" [R $id role]] == 0 } {
+ assert_equal {OK} [R $id CLUSTER FAILOVER]
+ wait_for_condition 50 100 {
+ [regexp "master" [R $id role]] == 1
+ } else {
+ fail "instance $id is not master"
+ }
+ }
+}
+
+test "Create a 8 nodes cluster with 4 shards" {
+ cluster_create_with_split_slots 4 4
+}
+
+test "Cluster should start ok" {
+ assert_cluster_state ok
+}
+
+test "Set cluster hostnames and verify they are propagated" {
+ for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
+ R $j config set cluster-announce-hostname "host-$j.com"
+ }
+
+ # Wait for everyone to agree about the state
+ wait_for_cluster_propagation
+}
+
+test "Verify information about the shards" {
+ set ids {}
+ for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
+ lappend ids [R $j CLUSTER MYID]
+ }
+ set slots [list $::slot0 $::slot1 $::slot2 $::slot3 $::slot0 $::slot1 $::slot2 $::slot3]
+
+ # Verify on each node (primary/replica), the response of the `CLUSTER SLOTS` command is consistent.
+ for {set ref 0} {$ref < $::cluster_master_nodes + $::cluster_replica_nodes} {incr ref} {
+ for {set i 0} {$i < $::cluster_master_nodes + $::cluster_replica_nodes} {incr i} {
+ assert_equal [lindex $slots $i] [dict get [get_node_info_from_shard [lindex $ids $i] $ref "shard"] slots]
+ assert_equal "host-$i.com" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] hostname]
+ assert_equal "127.0.0.1" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] ip]
+ # Default value of 'cluster-preferred-endpoint-type' is ip.
+ assert_equal "127.0.0.1" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] endpoint]
+
+ if {$::tls} {
+ assert_equal [get_instance_attrib redis $i plaintext-port] [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] port]
+ assert_equal [get_instance_attrib redis $i port] [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] tls-port]
+ } else {
+ assert_equal [get_instance_attrib redis $i port] [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] port]
+ }
+
+ if {$i < 4} {
+ assert_equal "master" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] role]
+ assert_equal "online" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] health]
+ } else {
+ assert_equal "replica" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] role]
+ # Replica could be in online or loading
+ }
+ }
+ }
+}
+
+test "Verify no slot shard" {
+ # Node 8 has no slots assigned
+ set node_8_id [R 8 CLUSTER MYID]
+ assert_equal {} [dict get [get_node_info_from_shard $node_8_id 8 "shard"] slots]
+ assert_equal {} [dict get [get_node_info_from_shard $node_8_id 0 "shard"] slots]
+}
+
+set node_0_id [R 0 CLUSTER MYID]
+
+test "Kill a node and tell the replica to immediately takeover" {
+ kill_instance redis 0
+ R 4 cluster failover force
+}
+
+# Primary 0 node should report as fail, wait until the new primary acknowledges it.
+test "Verify health as fail for killed node" {
+ wait_for_condition 50 100 {
+ "fail" eq [dict get [get_node_info_from_shard $node_0_id 4 "node"] "health"]
+ } else {
+ fail "New primary never detected the node failed"
+ }
+}
+
+set primary_id 4
+set replica_id 0
+
+test "Restarting primary node" {
+ restart_instance redis $replica_id
+}
+
+test "Instance #0 gets converted into a replica" {
+ wait_for_condition 1000 50 {
+ [RI $replica_id role] eq {slave}
+ } else {
+ fail "Old primary was not converted into replica"
+ }
+}
+
+test "Test the replica reports a loading state while it's loading" {
+ # Test the command is good for verifying everything moves to a happy state
+ set replica_cluster_id [R $replica_id CLUSTER MYID]
+ wait_for_condition 50 1000 {
+ [dict get [get_node_info_from_shard $replica_cluster_id $primary_id "node"] health] eq "online"
+ } else {
+ fail "Replica never transitioned to online"
+ }
+
+ # Set 1 MB of data, so there is something to load on full sync
+ R $primary_id debug populate 1000 key 1000
+
+ # Kill replica client for primary and load new data to the primary
+ R $primary_id config set repl-backlog-size 100
+
+ # Set the key load delay so that it will take at least
+ # 2 seconds to fully load the data.
+ R $replica_id config set key-load-delay 4000
+
+ # Trigger event loop processing every 1024 bytes, this trigger
+ # allows us to send and receive cluster messages, so we are setting
+ # it low so that the cluster messages are sent more frequently.
+ R $replica_id config set loading-process-events-interval-bytes 1024
+
+ R $primary_id multi
+ R $primary_id client kill type replica
+ # populate the correct data
+ set num 100
+ set value [string repeat A 1024]
+ for {set j 0} {$j < $num} {incr j} {
+ # Use hashtag valid for shard #0
+ set key "{ch3}$j"
+ R $primary_id set $key $value
+ }
+ R $primary_id exec
+
+ # The replica should reconnect and start a full sync, it will gossip about it's health to the primary.
+ wait_for_condition 50 1000 {
+ "loading" eq [dict get [get_node_info_from_shard $replica_cluster_id $primary_id "node"] health]
+ } else {
+ fail "Replica never transitioned to loading"
+ }
+
+ # Verify cluster shards and cluster slots (deprecated) API responds while the node is loading data.
+ R $replica_id CLUSTER SHARDS
+ R $replica_id CLUSTER SLOTS
+
+ # Speed up the key loading and verify everything resumes
+ R $replica_id config set key-load-delay 0
+
+ wait_for_condition 50 1000 {
+ "online" eq [dict get [get_node_info_from_shard $replica_cluster_id $primary_id "node"] health]
+ } else {
+ fail "Replica never transitioned to online"
+ }
+
+ # Final sanity, the replica agrees it is online.
+ assert_equal "online" [dict get [get_node_info_from_shard $replica_cluster_id $replica_id "node"] health]
+}
+
+test "Regression test for a crash when calling SHARDS during handshake" {
+ # Reset forget a node, so we can use it to establish handshaking connections
+ set id [R 19 CLUSTER MYID]
+ R 19 CLUSTER RESET HARD
+ for {set i 0} {$i < 19} {incr i} {
+ R $i CLUSTER FORGET $id
+ }
+ R 19 cluster meet 127.0.0.1 [get_instance_attrib redis 0 port]
+ # This should line would previously crash, since all the outbound
+ # connections were in handshake state.
+ R 19 CLUSTER SHARDS
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+test "Shard ids are unique" {
+ set shard_ids {}
+ for {set i 0} {$i < 4} {incr i} {
+ set shard_id [R $i cluster myshardid]
+ assert_equal [dict exists $shard_ids $shard_id] 0
+ dict set shard_ids $shard_id 1
+ }
+}
+
+test "CLUSTER MYSHARDID reports same id for both primary and replica" {
+ for {set i 0} {$i < 4} {incr i} {
+ assert_equal [R $i cluster myshardid] [R [expr $i+4] cluster myshardid]
+ assert_equal [string length [R $i cluster myshardid]] 40
+ }
+}
+
+test "New replica receives primary's shard id" {
+ #find a primary
+ set id 0
+ for {} {$id < 8} {incr id} {
+ if {[regexp "master" [R $id role]]} {
+ break
+ }
+ }
+ assert_not_equal [R 8 cluster myshardid] [R $id cluster myshardid]
+ assert_equal {OK} [R 8 cluster replicate [R $id cluster myid]]
+ assert_equal [R 8 cluster myshardid] [R $id cluster myshardid]
+}
+
+test "CLUSTER MYSHARDID reports same shard id after shard restart" {
+ set node_ids {}
+ for {set i 0} {$i < 8} {incr i 4} {
+ dict set node_ids $i [R $i cluster myshardid]
+ kill_instance redis $i
+ wait_for_condition 50 100 {
+ [instance_is_killed redis $i]
+ } else {
+ fail "instance $i is not killed"
+ }
+ }
+ for {set i 0} {$i < 8} {incr i 4} {
+ restart_instance redis $i
+ }
+ assert_cluster_state ok
+ for {set i 0} {$i < 8} {incr i 4} {
+ assert_equal [dict get $node_ids $i] [R $i cluster myshardid]
+ }
+}
+
+test "CLUSTER MYSHARDID reports same shard id after cluster restart" {
+ set node_ids {}
+ for {set i 0} {$i < 8} {incr i} {
+ dict set node_ids $i [R $i cluster myshardid]
+ }
+ for {set i 0} {$i < 8} {incr i} {
+ kill_instance redis $i
+ wait_for_condition 50 100 {
+ [instance_is_killed redis $i]
+ } else {
+ fail "instance $i is not killed"
+ }
+ }
+ for {set i 0} {$i < 8} {incr i} {
+ restart_instance redis $i
+ }
+ assert_cluster_state ok
+ for {set i 0} {$i < 8} {incr i} {
+ assert_equal [dict get $node_ids $i] [R $i cluster myshardid]
+ }
+}
diff --git a/tests/cluster/tests/29-slot-migration-response.tcl b/tests/cluster/tests/29-slot-migration-response.tcl
new file mode 100644
index 0000000..060cc8d
--- /dev/null
+++ b/tests/cluster/tests/29-slot-migration-response.tcl
@@ -0,0 +1,50 @@
+# Tests for the response of slot migrations.
+
+source "../tests/includes/init-tests.tcl"
+source "../tests/includes/utils.tcl"
+
+test "Create a 2 nodes cluster" {
+ create_cluster 2 0
+ config_set_all_nodes cluster-allow-replica-migration no
+}
+
+test "Cluster is up" {
+ assert_cluster_state ok
+}
+
+set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]]
+catch {unset nodefrom}
+catch {unset nodeto}
+
+$cluster refresh_nodes_map
+
+test "Set many keys in the cluster" {
+ for {set i 0} {$i < 5000} {incr i} {
+ $cluster set $i $i
+ assert { [$cluster get $i] eq $i }
+ }
+}
+
+test "Test cluster responses during migration of slot x" {
+
+ set slot 10
+ array set nodefrom [$cluster masternode_for_slot $slot]
+ array set nodeto [$cluster masternode_notfor_slot $slot]
+
+ $nodeto(link) cluster setslot $slot importing $nodefrom(id)
+ $nodefrom(link) cluster setslot $slot migrating $nodeto(id)
+
+ # Get a key from that slot
+ set key [$nodefrom(link) cluster GETKEYSINSLOT $slot "1"]
+
+ # MOVED REPLY
+ assert_error "*MOVED*" {$nodeto(link) set $key "newVal"}
+
+ # ASK REPLY
+ assert_error "*ASK*" {$nodefrom(link) set "abc{$key}" "newVal"}
+
+ # UNSTABLE REPLY
+ assert_error "*TRYAGAIN*" {$nodefrom(link) mset "a{$key}" "newVal" $key "newVal2"}
+}
+
+config_set_all_nodes cluster-allow-replica-migration yes
diff --git a/tests/cluster/tests/helpers/onlydots.tcl b/tests/cluster/tests/helpers/onlydots.tcl
new file mode 100644
index 0000000..4a6d1ae
--- /dev/null
+++ b/tests/cluster/tests/helpers/onlydots.tcl
@@ -0,0 +1,16 @@
+# Read the standard input and only shows dots in the output, filtering out
+# all the other characters. Designed to avoid bufferization so that when
+# we get the output of redis-trib and want to show just the dots, we'll see
+# the dots as soon as redis-trib will output them.
+
+fconfigure stdin -buffering none
+
+while 1 {
+ set c [read stdin 1]
+ if {$c eq {}} {
+ exit 0; # EOF
+ } elseif {$c eq {.}} {
+ puts -nonewline .
+ flush stdout
+ }
+}
diff --git a/tests/cluster/tests/includes/init-tests.tcl b/tests/cluster/tests/includes/init-tests.tcl
new file mode 100644
index 0000000..4875a01
--- /dev/null
+++ b/tests/cluster/tests/includes/init-tests.tcl
@@ -0,0 +1,91 @@
+# Initialization tests -- most units will start including this.
+
+test "(init) Restart killed instances" {
+ foreach type {redis} {
+ foreach_${type}_id id {
+ if {[get_instance_attrib $type $id pid] == -1} {
+ puts -nonewline "$type/$id "
+ flush stdout
+ restart_instance $type $id
+ }
+ }
+ }
+}
+
+test "Cluster nodes are reachable" {
+ foreach_redis_id id {
+ # Every node should be reachable.
+ wait_for_condition 1000 50 {
+ ([catch {R $id ping} ping_reply] == 0) &&
+ ($ping_reply eq {PONG})
+ } else {
+ catch {R $id ping} err
+ fail "Node #$id keeps replying '$err' to PING."
+ }
+ }
+}
+
+test "Cluster nodes hard reset" {
+ foreach_redis_id id {
+ if {$::valgrind} {
+ set node_timeout 10000
+ } else {
+ set node_timeout 3000
+ }
+ catch {R $id flushall} ; # May fail for readonly slaves.
+ R $id MULTI
+ R $id cluster reset hard
+ R $id cluster set-config-epoch [expr {$id+1}]
+ R $id EXEC
+ R $id config set cluster-node-timeout $node_timeout
+ R $id config set cluster-slave-validity-factor 10
+ R $id config set loading-process-events-interval-bytes 2097152
+ R $id config set key-load-delay 0
+ R $id config set repl-diskless-load disabled
+ R $id config set cluster-announce-hostname ""
+ R $id DEBUG DROP-CLUSTER-PACKET-FILTER -1
+ R $id config rewrite
+ }
+}
+
+# Helper function to attempt to have each node in a cluster
+# meet each other.
+proc join_nodes_in_cluster {} {
+ # Join node 0 with 1, 1 with 2, ... and so forth.
+ # If auto-discovery works all nodes will know every other node
+ # eventually.
+ set ids {}
+ foreach_redis_id id {lappend ids $id}
+ for {set j 0} {$j < [expr [llength $ids]-1]} {incr j} {
+ set a [lindex $ids $j]
+ set b [lindex $ids [expr $j+1]]
+ set b_port [get_instance_attrib redis $b port]
+ R $a cluster meet 127.0.0.1 $b_port
+ }
+
+ foreach_redis_id id {
+ wait_for_condition 1000 50 {
+ [llength [get_cluster_nodes $id connected]] == [llength $ids]
+ } else {
+ return 0
+ }
+ }
+ return 1
+}
+
+test "Cluster Join and auto-discovery test" {
+ # Use multiple attempts since sometimes nodes timeout
+ # while attempting to connect.
+ for {set attempts 3} {$attempts > 0} {incr attempts -1} {
+ if {[join_nodes_in_cluster] == 1} {
+ break
+ }
+ }
+ if {$attempts == 0} {
+ fail "Cluster failed to form full mesh"
+ }
+}
+
+test "Before slots allocation, all nodes report cluster failure" {
+ assert_cluster_state fail
+}
diff --git a/tests/cluster/tests/includes/utils.tcl b/tests/cluster/tests/includes/utils.tcl
new file mode 100644
index 0000000..c1b0fe6
--- /dev/null
+++ b/tests/cluster/tests/includes/utils.tcl
@@ -0,0 +1,36 @@
+source "../../../tests/support/cli.tcl"
+
+proc config_set_all_nodes {keyword value} {
+ foreach_redis_id id {
+ R $id config set $keyword $value
+ }
+}
+
+proc fix_cluster {addr} {
+ set code [catch {
+ exec ../../../src/redis-cli {*}[rediscli_tls_config "../../../tests"] --cluster fix $addr << yes
+ } result]
+ if {$code != 0} {
+ puts "redis-cli --cluster fix returns non-zero exit code, output below:\n$result"
+ }
+ # Note: redis-cli --cluster fix may return a non-zero exit code if nodes don't agree,
+ # but we can ignore that and rely on the check below.
+ assert_cluster_state ok
+ wait_for_condition 100 100 {
+ [catch {exec ../../../src/redis-cli {*}[rediscli_tls_config "../../../tests"] --cluster check $addr} result] == 0
+ } else {
+ puts "redis-cli --cluster check returns non-zero exit code, output below:\n$result"
+ fail "Cluster could not settle with configuration"
+ }
+}
+
+proc wait_cluster_stable {} {
+ wait_for_condition 1000 50 {
+ [catch {exec ../../../src/redis-cli --cluster \
+ check 127.0.0.1:[get_instance_attrib redis 0 port] \
+ {*}[rediscli_tls_config "../../../tests"] \
+ }] == 0
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+} \ No newline at end of file
diff --git a/tests/cluster/tmp/.gitignore b/tests/cluster/tmp/.gitignore
new file mode 100644
index 0000000..f581f73
--- /dev/null
+++ b/tests/cluster/tmp/.gitignore
@@ -0,0 +1,2 @@
+redis_*
+sentinel_*
diff --git a/tests/helpers/bg_block_op.tcl b/tests/helpers/bg_block_op.tcl
new file mode 100644
index 0000000..dc4e1a9
--- /dev/null
+++ b/tests/helpers/bg_block_op.tcl
@@ -0,0 +1,55 @@
+source tests/support/redis.tcl
+source tests/support/util.tcl
+
+set ::tlsdir "tests/tls"
+
+# This function sometimes writes sometimes blocking-reads from lists/sorted
+# sets. There are multiple processes like this executing at the same time
+# so that we have some chance to trap some corner condition if there is
+# a regression. For this to happen it is important that we narrow the key
+# space to just a few elements, and balance the operations so that it is
+# unlikely that lists and zsets just get more data without ever causing
+# blocking.
+proc bg_block_op {host port db ops tls} {
+ set r [redis $host $port 0 $tls]
+ $r client setname LOAD_HANDLER
+ $r select $db
+
+ for {set j 0} {$j < $ops} {incr j} {
+
+ # List side
+ set k list_[randomInt 10]
+ set k2 list_[randomInt 10]
+ set v [randomValue]
+
+ randpath {
+ randpath {
+ $r rpush $k $v
+ } {
+ $r lpush $k $v
+ }
+ } {
+ $r blpop $k 2
+ } {
+ $r blpop $k $k2 2
+ }
+
+ # Zset side
+ set k zset_[randomInt 10]
+ set k2 zset_[randomInt 10]
+ set v1 [randomValue]
+ set v2 [randomValue]
+
+ randpath {
+ $r zadd $k [randomInt 10000] $v
+ } {
+ $r zadd $k [randomInt 10000] $v [randomInt 10000] $v2
+ } {
+ $r bzpopmin $k 2
+ } {
+ $r bzpopmax $k 2
+ }
+ }
+}
+
+bg_block_op [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] [lindex $argv 3] [lindex $argv 4]
diff --git a/tests/helpers/bg_complex_data.tcl b/tests/helpers/bg_complex_data.tcl
new file mode 100644
index 0000000..9c0044e
--- /dev/null
+++ b/tests/helpers/bg_complex_data.tcl
@@ -0,0 +1,13 @@
+source tests/support/redis.tcl
+source tests/support/util.tcl
+
+set ::tlsdir "tests/tls"
+
+proc bg_complex_data {host port db ops tls} {
+ set r [redis $host $port 0 $tls]
+ $r client setname LOAD_HANDLER
+ $r select $db
+ createComplexDataset $r $ops
+}
+
+bg_complex_data [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] [lindex $argv 3] [lindex $argv 4]
diff --git a/tests/helpers/fake_redis_node.tcl b/tests/helpers/fake_redis_node.tcl
new file mode 100644
index 0000000..a12d87f
--- /dev/null
+++ b/tests/helpers/fake_redis_node.tcl
@@ -0,0 +1,58 @@
+# A fake Redis node for replaying predefined/expected traffic with a client.
+#
+# Usage: tclsh fake_redis_node.tcl PORT COMMAND REPLY [ COMMAND REPLY [ ... ] ]
+#
+# Commands are given as space-separated strings, e.g. "GET foo", and replies as
+# RESP-encoded replies minus the trailing \r\n, e.g. "+OK".
+
+set port [lindex $argv 0];
+set expected_traffic [lrange $argv 1 end];
+
+# Reads and parses a command from a socket and returns it as a space-separated
+# string, e.g. "set foo bar".
+proc read_command {sock} {
+ set char [read $sock 1]
+ switch $char {
+ * {
+ set numargs [gets $sock]
+ set result {}
+ for {set i 0} {$i<$numargs} {incr i} {
+ read $sock 1; # dollar sign
+ set len [gets $sock]
+ set str [read $sock $len]
+ gets $sock; # trailing \r\n
+ lappend result $str
+ }
+ return $result
+ }
+ {} {
+ # EOF
+ return {}
+ }
+ default {
+ # Non-RESP command
+ set rest [gets $sock]
+ return "$char$rest"
+ }
+ }
+}
+
+proc accept {sock host port} {
+ global expected_traffic
+ foreach {expect_cmd reply} $expected_traffic {
+ if {[eof $sock]} {break}
+ set cmd [read_command $sock]
+ if {[string equal -nocase $cmd $expect_cmd]} {
+ puts $sock $reply
+ flush $sock
+ } else {
+ puts $sock "-ERR unexpected command $cmd"
+ break
+ }
+ }
+ close $sock
+}
+
+socket -server accept $port
+after 5000 set done timeout
+vwait done
diff --git a/tests/helpers/gen_write_load.tcl b/tests/helpers/gen_write_load.tcl
new file mode 100644
index 0000000..568f5cd
--- /dev/null
+++ b/tests/helpers/gen_write_load.tcl
@@ -0,0 +1,18 @@
+source tests/support/redis.tcl
+
+set ::tlsdir "tests/tls"
+
+proc gen_write_load {host port seconds tls} {
+ set start_time [clock seconds]
+ set r [redis $host $port 1 $tls]
+ $r client setname LOAD_HANDLER
+ $r select 9
+ while 1 {
+ $r set [expr rand()] [expr rand()]
+ if {[clock seconds]-$start_time > $seconds} {
+ exit 0
+ }
+ }
+}
+
+gen_write_load [lindex $argv 0] [lindex $argv 1] [lindex $argv 2] [lindex $argv 3]
diff --git a/tests/instances.tcl b/tests/instances.tcl
new file mode 100644
index 0000000..56a51a8
--- /dev/null
+++ b/tests/instances.tcl
@@ -0,0 +1,742 @@
+# Multi-instance test framework.
+# This is used in order to test Sentinel and Redis Cluster, and provides
+# basic capabilities for spawning and handling N parallel Redis / Sentinel
+# instances.
+#
+# Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com
+# This software is released under the BSD License. See the COPYING file for
+# more information.
+
+package require Tcl 8.5
+
+set tcl_precision 17
+source ../support/redis.tcl
+source ../support/util.tcl
+source ../support/aofmanifest.tcl
+source ../support/server.tcl
+source ../support/test.tcl
+
+set ::verbose 0
+set ::valgrind 0
+set ::tls 0
+set ::tls_module 0
+set ::pause_on_error 0
+set ::dont_clean 0
+set ::simulate_error 0
+set ::failed 0
+set ::sentinel_instances {}
+set ::redis_instances {}
+set ::global_config {}
+set ::sentinel_base_port 20000
+set ::redis_base_port 30000
+set ::redis_port_count 1024
+set ::host "127.0.0.1"
+set ::leaked_fds_file [file normalize "tmp/leaked_fds.txt"]
+set ::pids {} ; # We kill everything at exit
+set ::dirs {} ; # We remove all the temp dirs at exit
+set ::run_matching {} ; # If non empty, only tests matching pattern are run.
+set ::stop_on_failure 0
+set ::loop 0
+
+if {[catch {cd tmp}]} {
+ puts "tmp directory not found."
+ puts "Please run this test from the Redis source root."
+ exit 1
+}
+
+# Execute the specified instance of the server specified by 'type', using
+# the provided configuration file. Returns the PID of the process.
+proc exec_instance {type dirname cfgfile} {
+ if {$type eq "redis"} {
+ set prgname redis-server
+ } elseif {$type eq "sentinel"} {
+ set prgname redis-sentinel
+ } else {
+ error "Unknown instance type."
+ }
+
+ set errfile [file join $dirname err.txt]
+ if {$::valgrind} {
+ set pid [exec valgrind --track-origins=yes --suppressions=../../../src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full ../../../src/${prgname} $cfgfile 2>> $errfile &]
+ } else {
+ set pid [exec ../../../src/${prgname} $cfgfile 2>> $errfile &]
+ }
+ return $pid
+}
+
+# Spawn a redis or sentinel instance, depending on 'type'.
+proc spawn_instance {type base_port count {conf {}} {base_conf_file ""}} {
+ for {set j 0} {$j < $count} {incr j} {
+ set port [find_available_port $base_port $::redis_port_count]
+ # plaintext port (only used for TLS cluster)
+ set pport 0
+ # Create a directory for this instance.
+ set dirname "${type}_${j}"
+ lappend ::dirs $dirname
+ catch {exec rm -rf $dirname}
+ file mkdir $dirname
+
+ # Write the instance config file.
+ set cfgfile [file join $dirname $type.conf]
+ if {$base_conf_file ne ""} {
+ file copy -- $base_conf_file $cfgfile
+ set cfg [open $cfgfile a+]
+ } else {
+ set cfg [open $cfgfile w]
+ }
+
+ if {$::tls} {
+ if {$::tls_module} {
+ puts $cfg [format "loadmodule %s/../../../src/redis-tls.so" [pwd]]
+ }
+
+ puts $cfg "tls-port $port"
+ puts $cfg "tls-replication yes"
+ puts $cfg "tls-cluster yes"
+ # plaintext port, only used by plaintext clients in a TLS cluster
+ set pport [find_available_port $base_port $::redis_port_count]
+ puts $cfg "port $pport"
+ puts $cfg [format "tls-cert-file %s/../../tls/server.crt" [pwd]]
+ puts $cfg [format "tls-key-file %s/../../tls/server.key" [pwd]]
+ puts $cfg [format "tls-client-cert-file %s/../../tls/client.crt" [pwd]]
+ puts $cfg [format "tls-client-key-file %s/../../tls/client.key" [pwd]]
+ puts $cfg [format "tls-dh-params-file %s/../../tls/redis.dh" [pwd]]
+ puts $cfg [format "tls-ca-cert-file %s/../../tls/ca.crt" [pwd]]
+ } else {
+ puts $cfg "port $port"
+ }
+
+ if {$::log_req_res} {
+ puts $cfg "req-res-logfile stdout.reqres"
+ }
+
+ if {$::force_resp3} {
+ puts $cfg "client-default-resp 3"
+ }
+
+ puts $cfg "repl-diskless-sync-delay 0"
+ puts $cfg "dir ./$dirname"
+ puts $cfg "logfile log.txt"
+ # Add additional config files
+ foreach directive $conf {
+ puts $cfg $directive
+ }
+ dict for {name val} $::global_config {
+ puts $cfg "$name $val"
+ }
+ close $cfg
+
+ # Finally exec it and remember the pid for later cleanup.
+ set retry 100
+ while {$retry} {
+ set pid [exec_instance $type $dirname $cfgfile]
+
+ # Check availability
+ if {[server_is_up 127.0.0.1 $port 100] == 0} {
+ puts "Starting $type #$j at port $port failed, try another"
+ incr retry -1
+ set port [find_available_port $base_port $::redis_port_count]
+ set cfg [open $cfgfile a+]
+ if {$::tls} {
+ puts $cfg "tls-port $port"
+ set pport [find_available_port $base_port $::redis_port_count]
+ puts $cfg "port $pport"
+ } else {
+ puts $cfg "port $port"
+ }
+ close $cfg
+ } else {
+ puts "Starting $type #$j at port $port"
+ lappend ::pids $pid
+ break
+ }
+ }
+
+ # Check availability finally
+ if {[server_is_up $::host $port 100] == 0} {
+ set logfile [file join $dirname log.txt]
+ puts [exec tail $logfile]
+ abort_sentinel_test "Problems starting $type #$j: ping timeout, maybe server start failed, check $logfile"
+ }
+
+ # Push the instance into the right list
+ set link [redis $::host $port 0 $::tls]
+ $link reconnect 1
+ lappend ::${type}_instances [list \
+ pid $pid \
+ host $::host \
+ port $port \
+ plaintext-port $pport \
+ link $link \
+ ]
+ }
+}
+
+proc log_crashes {} {
+ set start_pattern {*REDIS BUG REPORT START*}
+ set logs [glob */log.txt]
+ foreach log $logs {
+ set fd [open $log]
+ set found 0
+ while {[gets $fd line] >= 0} {
+ if {[string match $start_pattern $line]} {
+ puts "\n*** Crash report found in $log ***"
+ set found 1
+ }
+ if {$found} {
+ puts $line
+ incr ::failed
+ }
+ }
+ }
+
+ set logs [glob */err.txt]
+ foreach log $logs {
+ set res [find_valgrind_errors $log true]
+ if {$res != ""} {
+ puts $res
+ incr ::failed
+ }
+ }
+
+ set logs [glob */err.txt]
+ foreach log $logs {
+ set res [sanitizer_errors_from_file $log]
+ if {$res != ""} {
+ puts $res
+ incr ::failed
+ }
+ }
+}
+
+proc is_alive pid {
+ if {[catch {exec ps -p $pid} err]} {
+ return 0
+ } else {
+ return 1
+ }
+}
+
+proc stop_instance pid {
+ catch {exec kill $pid}
+ # Node might have been stopped in the test
+ catch {exec kill -SIGCONT $pid}
+ if {$::valgrind} {
+ set max_wait 120000
+ } else {
+ set max_wait 10000
+ }
+ while {[is_alive $pid]} {
+ incr wait 10
+
+ if {$wait == $max_wait} {
+ puts [colorstr red "Forcing process $pid to crash..."]
+ catch {exec kill -SEGV $pid}
+ } elseif {$wait >= $max_wait * 2} {
+ puts [colorstr red "Forcing process $pid to exit..."]
+ catch {exec kill -KILL $pid}
+ } elseif {$wait % 1000 == 0} {
+ puts "Waiting for process $pid to exit..."
+ }
+ after 10
+ }
+}
+
+proc cleanup {} {
+ puts "Cleaning up..."
+ foreach pid $::pids {
+ puts "killing stale instance $pid"
+ stop_instance $pid
+ }
+ log_crashes
+ if {$::dont_clean} {
+ return
+ }
+ foreach dir $::dirs {
+ catch {exec rm -rf $dir}
+ }
+}
+
+proc abort_sentinel_test msg {
+ incr ::failed
+ puts "WARNING: Aborting the test."
+ puts ">>>>>>>> $msg"
+ if {$::pause_on_error} pause_on_error
+ cleanup
+ exit 1
+}
+
+proc parse_options {} {
+ for {set j 0} {$j < [llength $::argv]} {incr j} {
+ set opt [lindex $::argv $j]
+ set val [lindex $::argv [expr $j+1]]
+ if {$opt eq "--single"} {
+ incr j
+ lappend ::run_matching "*${val}*"
+ } elseif {$opt eq "--pause-on-error"} {
+ set ::pause_on_error 1
+ } elseif {$opt eq {--dont-clean}} {
+ set ::dont_clean 1
+ } elseif {$opt eq "--fail"} {
+ set ::simulate_error 1
+ } elseif {$opt eq {--valgrind}} {
+ set ::valgrind 1
+ } elseif {$opt eq {--host}} {
+ incr j
+ set ::host ${val}
+ } elseif {$opt eq {--tls} || $opt eq {--tls-module}} {
+ package require tls 1.6
+ ::tls::init \
+ -cafile "$::tlsdir/ca.crt" \
+ -certfile "$::tlsdir/client.crt" \
+ -keyfile "$::tlsdir/client.key"
+ set ::tls 1
+ if {$opt eq {--tls-module}} {
+ set ::tls_module 1
+ }
+ } elseif {$opt eq {--config}} {
+ set val2 [lindex $::argv [expr $j+2]]
+ dict set ::global_config $val $val2
+ incr j 2
+ } elseif {$opt eq {--stop}} {
+ set ::stop_on_failure 1
+ } elseif {$opt eq {--loop}} {
+ set ::loop 1
+ } elseif {$opt eq {--log-req-res}} {
+ set ::log_req_res 1
+ } elseif {$opt eq {--force-resp3}} {
+ set ::force_resp3 1
+ } elseif {$opt eq "--help"} {
+ puts "--single <pattern> Only runs tests specified by pattern."
+ puts "--dont-clean Keep log files on exit."
+ puts "--pause-on-error Pause for manual inspection on error."
+ puts "--fail Simulate a test failure."
+ puts "--valgrind Run with valgrind."
+ puts "--tls Run tests in TLS mode."
+ puts "--tls-module Run tests in TLS mode with Redis module."
+ puts "--host <host> Use hostname instead of 127.0.0.1."
+ puts "--config <k> <v> Extra config argument(s)."
+ puts "--stop Blocks once the first test fails."
+ puts "--loop Execute the specified set of tests forever."
+ puts "--help Shows this help."
+ exit 0
+ } else {
+ puts "Unknown option $opt"
+ exit 1
+ }
+ }
+}
+
+# If --pause-on-error option was passed at startup this function is called
+# on error in order to give the developer a chance to understand more about
+# the error condition while the instances are still running.
+proc pause_on_error {} {
+ puts ""
+ puts [colorstr yellow "*** Please inspect the error now ***"]
+ puts "\nType \"continue\" to resume the test, \"help\" for help screen.\n"
+ while 1 {
+ puts -nonewline "> "
+ flush stdout
+ set line [gets stdin]
+ set argv [split $line " "]
+ set cmd [lindex $argv 0]
+ if {$cmd eq {continue}} {
+ break
+ } elseif {$cmd eq {show-redis-logs}} {
+ set count 10
+ if {[lindex $argv 1] ne {}} {set count [lindex $argv 1]}
+ foreach_redis_id id {
+ puts "=== REDIS $id ===="
+ puts [exec tail -$count redis_$id/log.txt]
+ puts "---------------------\n"
+ }
+ } elseif {$cmd eq {show-sentinel-logs}} {
+ set count 10
+ if {[lindex $argv 1] ne {}} {set count [lindex $argv 1]}
+ foreach_sentinel_id id {
+ puts "=== SENTINEL $id ===="
+ puts [exec tail -$count sentinel_$id/log.txt]
+ puts "---------------------\n"
+ }
+ } elseif {$cmd eq {ls}} {
+ foreach_redis_id id {
+ puts -nonewline "Redis $id"
+ set errcode [catch {
+ set str {}
+ append str "@[RI $id tcp_port]: "
+ append str "[RI $id role] "
+ if {[RI $id role] eq {slave}} {
+ append str "[RI $id master_host]:[RI $id master_port]"
+ }
+ set str
+ } retval]
+ if {$errcode} {
+ puts " -- $retval"
+ } else {
+ puts $retval
+ }
+ }
+ foreach_sentinel_id id {
+ puts -nonewline "Sentinel $id"
+ set errcode [catch {
+ set str {}
+ append str "@[SI $id tcp_port]: "
+ append str "[join [S $id sentinel get-master-addr-by-name mymaster]]"
+ set str
+ } retval]
+ if {$errcode} {
+ puts " -- $retval"
+ } else {
+ puts $retval
+ }
+ }
+ } elseif {$cmd eq {help}} {
+ puts "ls List Sentinel and Redis instances."
+ puts "show-sentinel-logs \[N\] Show latest N lines of logs."
+ puts "show-redis-logs \[N\] Show latest N lines of logs."
+ puts "S <id> cmd ... arg Call command in Sentinel <id>."
+ puts "R <id> cmd ... arg Call command in Redis <id>."
+ puts "SI <id> <field> Show Sentinel <id> INFO <field>."
+ puts "RI <id> <field> Show Redis <id> INFO <field>."
+ puts "continue Resume test."
+ } else {
+ set errcode [catch {eval $line} retval]
+ if {$retval ne {}} {puts "$retval"}
+ }
+ }
+}
+
+# We redefine 'test' as for Sentinel we don't use the server-client
+# architecture for the test, everything is sequential.
+proc test {descr code} {
+ set ts [clock format [clock seconds] -format %H:%M:%S]
+ puts -nonewline "$ts> $descr: "
+ flush stdout
+
+ if {[catch {set retval [uplevel 1 $code]} error]} {
+ incr ::failed
+ if {[string match "assertion:*" $error]} {
+ set msg "FAILED: [string range $error 10 end]"
+ puts [colorstr red $msg]
+ if {$::pause_on_error} pause_on_error
+ puts [colorstr red "(Jumping to next unit after error)"]
+ return -code continue
+ } else {
+ # Re-raise, let handler up the stack take care of this.
+ error $error $::errorInfo
+ }
+ } else {
+ puts [colorstr green OK]
+ }
+}
+
+# Check memory leaks when running on OSX using the "leaks" utility.
+proc check_leaks instance_types {
+ if {[string match {*Darwin*} [exec uname -a]]} {
+ puts -nonewline "Testing for memory leaks..."; flush stdout
+ foreach type $instance_types {
+ foreach_instance_id [set ::${type}_instances] id {
+ if {[instance_is_killed $type $id]} continue
+ set pid [get_instance_attrib $type $id pid]
+ set output {0 leaks}
+ catch {exec leaks $pid} output
+ if {[string match {*process does not exist*} $output] ||
+ [string match {*cannot examine*} $output]} {
+ # In a few tests we kill the server process.
+ set output "0 leaks"
+ } else {
+ puts -nonewline "$type/$pid "
+ flush stdout
+ }
+ if {![string match {*0 leaks*} $output]} {
+ puts [colorstr red "=== MEMORY LEAK DETECTED ==="]
+ puts "Instance type $type, ID $id:"
+ puts $output
+ puts "==="
+ incr ::failed
+ }
+ }
+ }
+ puts ""
+ }
+}
+
+# Execute all the units inside the 'tests' directory.
+proc run_tests {} {
+ set tests [lsort [glob ../tests/*]]
+
+while 1 {
+ foreach test $tests {
+ # Remove leaked_fds file before starting
+ if {$::leaked_fds_file != "" && [file exists $::leaked_fds_file]} {
+ file delete $::leaked_fds_file
+ }
+
+ if {[llength $::run_matching] != 0 && ![search_pattern_list $test $::run_matching true]} {
+ continue
+ }
+ if {[file isdirectory $test]} continue
+ puts [colorstr yellow "Testing unit: [lindex [file split $test] end]"]
+ if {[catch { source $test } err]} {
+ puts "FAILED: caught an error in the test $err"
+ puts $::errorInfo
+ incr ::failed
+ # letting the tests resume, so we'll eventually reach the cleanup and report crashes
+
+ if {$::stop_on_failure} {
+ puts -nonewline "(Test stopped, press enter to resume the tests)"
+ flush stdout
+ gets stdin
+ }
+ }
+ check_leaks {redis sentinel}
+
+ # Check if a leaked fds file was created and abort the test.
+ if {$::leaked_fds_file != "" && [file exists $::leaked_fds_file]} {
+ puts [colorstr red "ERROR: Sentinel has leaked fds to scripts:"]
+ puts [exec cat $::leaked_fds_file]
+ puts "----"
+ incr ::failed
+ }
+ }
+
+ if {$::loop == 0} { break }
+} ;# while 1
+}
+
+# Print a message and exists with 0 / 1 according to zero or more failures.
+proc end_tests {} {
+ if {$::failed == 0 } {
+ puts [colorstr green "GOOD! No errors."]
+ exit 0
+ } else {
+ puts [colorstr red "WARNING $::failed test(s) failed."]
+ exit 1
+ }
+}
+
+# The "S" command is used to interact with the N-th Sentinel.
+# The general form is:
+#
+# S <sentinel-id> command arg arg arg ...
+#
+# Example to ping the Sentinel 0 (first instance): S 0 PING
+proc S {n args} {
+ set s [lindex $::sentinel_instances $n]
+ [dict get $s link] {*}$args
+}
+
+# Returns a Redis instance by index.
+# Example:
+# [Rn 0] info
+proc Rn {n} {
+ return [dict get [lindex $::redis_instances $n] link]
+}
+
+# Like R but to chat with Redis instances.
+proc R {n args} {
+ [Rn $n] {*}$args
+}
+
+proc get_info_field {info field} {
+ set fl [string length $field]
+ append field :
+ foreach line [split $info "\n"] {
+ set line [string trim $line "\r\n "]
+ if {[string range $line 0 $fl] eq $field} {
+ return [string range $line [expr {$fl+1}] end]
+ }
+ }
+ return {}
+}
+
+proc SI {n field} {
+ get_info_field [S $n info] $field
+}
+
+proc RI {n field} {
+ get_info_field [R $n info] $field
+}
+
+proc RPort {n} {
+ if {$::tls} {
+ return [lindex [R $n config get tls-port] 1]
+ } else {
+ return [lindex [R $n config get port] 1]
+ }
+}
+
+# Iterate over IDs of sentinel or redis instances.
+proc foreach_instance_id {instances idvar code} {
+ upvar 1 $idvar id
+ for {set id 0} {$id < [llength $instances]} {incr id} {
+ set errcode [catch {uplevel 1 $code} result]
+ if {$errcode == 1} {
+ error $result $::errorInfo $::errorCode
+ } elseif {$errcode == 4} {
+ continue
+ } elseif {$errcode == 3} {
+ break
+ } elseif {$errcode != 0} {
+ return -code $errcode $result
+ }
+ }
+}
+
+proc foreach_sentinel_id {idvar code} {
+ set errcode [catch {uplevel 1 [list foreach_instance_id $::sentinel_instances $idvar $code]} result]
+ return -code $errcode $result
+}
+
+proc foreach_redis_id {idvar code} {
+ set errcode [catch {uplevel 1 [list foreach_instance_id $::redis_instances $idvar $code]} result]
+ return -code $errcode $result
+}
+
+# Get the specific attribute of the specified instance type, id.
+proc get_instance_attrib {type id attrib} {
+ dict get [lindex [set ::${type}_instances] $id] $attrib
+}
+
+# Set the specific attribute of the specified instance type, id.
+proc set_instance_attrib {type id attrib newval} {
+ set d [lindex [set ::${type}_instances] $id]
+ dict set d $attrib $newval
+ lset ::${type}_instances $id $d
+}
+
+# Create a master-slave cluster of the given number of total instances.
+# The first instance "0" is the master, all others are configured as
+# slaves.
+proc create_redis_master_slave_cluster n {
+ foreach_redis_id id {
+ if {$id == 0} {
+ # Our master.
+ R $id slaveof no one
+ R $id flushall
+ } elseif {$id < $n} {
+ R $id slaveof [get_instance_attrib redis 0 host] \
+ [get_instance_attrib redis 0 port]
+ } else {
+ # Instances not part of the cluster.
+ R $id slaveof no one
+ }
+ }
+ # Wait for all the slaves to sync.
+ wait_for_condition 1000 50 {
+ [RI 0 connected_slaves] == ($n-1)
+ } else {
+ fail "Unable to create a master-slaves cluster."
+ }
+}
+
+proc get_instance_id_by_port {type port} {
+ foreach_${type}_id id {
+ if {[get_instance_attrib $type $id port] == $port} {
+ return $id
+ }
+ }
+ fail "Instance $type port $port not found."
+}
+
+# Kill an instance of the specified type/id with SIGKILL.
+# This function will mark the instance PID as -1 to remember that this instance
+# is no longer running and will remove its PID from the list of pids that
+# we kill at cleanup.
+#
+# The instance can be restarted with restart-instance.
+proc kill_instance {type id} {
+ set pid [get_instance_attrib $type $id pid]
+ set port [get_instance_attrib $type $id port]
+
+ if {$pid == -1} {
+ error "You tried to kill $type $id twice."
+ }
+
+ stop_instance $pid
+ set_instance_attrib $type $id pid -1
+ set_instance_attrib $type $id link you_tried_to_talk_with_killed_instance
+
+ # Remove the PID from the list of pids to kill at exit.
+ set ::pids [lsearch -all -inline -not -exact $::pids $pid]
+
+ # Wait for the port it was using to be available again, so that's not
+ # an issue to start a new server ASAP with the same port.
+ set retry 100
+ while {[incr retry -1]} {
+ set port_is_free [catch {set s [socket 127.0.0.1 $port]}]
+ if {$port_is_free} break
+ catch {close $s}
+ after 100
+ }
+ if {$retry == 0} {
+ error "Port $port does not return available after killing instance."
+ }
+}
+
+# Return true of the instance of the specified type/id is killed.
+proc instance_is_killed {type id} {
+ set pid [get_instance_attrib $type $id pid]
+ expr {$pid == -1}
+}
+
+# Restart an instance previously killed by kill_instance
+proc restart_instance {type id} {
+ set dirname "${type}_${id}"
+ set cfgfile [file join $dirname $type.conf]
+ set port [get_instance_attrib $type $id port]
+
+ # Execute the instance with its old setup and append the new pid
+ # file for cleanup.
+ set pid [exec_instance $type $dirname $cfgfile]
+ set_instance_attrib $type $id pid $pid
+ lappend ::pids $pid
+
+ # Check that the instance is running
+ if {[server_is_up 127.0.0.1 $port 100] == 0} {
+ set logfile [file join $dirname log.txt]
+ puts [exec tail $logfile]
+ abort_sentinel_test "Problems starting $type #$id: ping timeout, maybe server start failed, check $logfile"
+ }
+
+ # Connect with it with a fresh link
+ set link [redis 127.0.0.1 $port 0 $::tls]
+ $link reconnect 1
+ set_instance_attrib $type $id link $link
+
+ # Make sure the instance is not loading the dataset when this
+ # function returns.
+ while 1 {
+ catch {[$link ping]} retval
+ if {[string match {*LOADING*} $retval]} {
+ after 100
+ continue
+ } else {
+ break
+ }
+ }
+}
+
+proc redis_deferring_client {type id} {
+ set port [get_instance_attrib $type $id port]
+ set host [get_instance_attrib $type $id host]
+ set client [redis $host $port 1 $::tls]
+ return $client
+}
+
+proc redis_deferring_client_by_addr {host port} {
+ set client [redis $host $port 1 $::tls]
+ return $client
+}
+
+proc redis_client {type id} {
+ set port [get_instance_attrib $type $id port]
+ set host [get_instance_attrib $type $id host]
+ set client [redis $host $port 0 $::tls]
+ return $client
+}
+
+proc redis_client_by_addr {host port} {
+ set client [redis $host $port 0 $::tls]
+ return $client
+}
diff --git a/tests/integration/aof-multi-part.tcl b/tests/integration/aof-multi-part.tcl
new file mode 100644
index 0000000..1d41a8a
--- /dev/null
+++ b/tests/integration/aof-multi-part.tcl
@@ -0,0 +1,1332 @@
+source tests/support/aofmanifest.tcl
+set defaults {appendonly {yes} appendfilename {appendonly.aof} appenddirname {appendonlydir} auto-aof-rewrite-percentage {0}}
+set server_path [tmpdir server.multi.aof]
+set aof_dirname "appendonlydir"
+set aof_basename "appendonly.aof"
+set aof_dirpath "$server_path/$aof_dirname"
+set aof_base1_file "$server_path/$aof_dirname/${aof_basename}.1$::base_aof_sufix$::aof_format_suffix"
+set aof_base2_file "$server_path/$aof_dirname/${aof_basename}.2$::base_aof_sufix$::aof_format_suffix"
+set aof_incr1_file "$server_path/$aof_dirname/${aof_basename}.1$::incr_aof_sufix$::aof_format_suffix"
+set aof_incr2_file "$server_path/$aof_dirname/${aof_basename}.2$::incr_aof_sufix$::aof_format_suffix"
+set aof_incr3_file "$server_path/$aof_dirname/${aof_basename}.3$::incr_aof_sufix$::aof_format_suffix"
+set aof_manifest_file "$server_path/$aof_dirname/${aof_basename}$::manifest_suffix"
+set aof_old_name_old_path "$server_path/$aof_basename"
+set aof_old_name_new_path "$aof_dirpath/$aof_basename"
+set aof_old_name_old_path2 "$server_path/${aof_basename}2"
+set aof_manifest_file2 "$server_path/$aof_dirname/${aof_basename}2$::manifest_suffix"
+
+tags {"external:skip"} {
+
+ # Test Part 1
+
+ # In order to test the loading logic of redis under different combinations of manifest and AOF.
+ # We will manually construct the manifest file and AOF, and then start redis to verify whether
+ # the redis behavior is as expected.
+
+ test {Multi Part AOF can't load data when some file missing} {
+ create_aof $aof_dirpath $aof_base1_file {
+ append_to_aof [formatCommand set k1 v1]
+ }
+
+ create_aof $aof_dirpath $aof_incr2_file {
+ append_to_aof [formatCommand set k2 v2]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n"
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
+ append_to_manifest "file appendonly.aof.2.incr.aof seq 2 type i\n"
+ }
+
+ start_server_aof [list dir $server_path] {
+ wait_for_condition 100 50 {
+ ! [is_alive $srv]
+ } else {
+ fail "AOF loading didn't fail"
+ }
+
+ assert_equal 1 [count_message_lines $server_path/stdout "appendonly.aof.1.incr.aof .*No such file or directory"]
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can't load data when the sequence not increase monotonically} {
+ create_aof $aof_dirpath $aof_incr1_file {
+ append_to_aof [formatCommand set k1 v1]
+ }
+
+ create_aof $aof_dirpath $aof_incr2_file {
+ append_to_aof [formatCommand set k2 v2]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.2.incr.aof seq 2 type i\n"
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
+ }
+
+ start_server_aof [list dir $server_path] {
+ wait_for_condition 100 50 {
+ ! [is_alive $srv]
+ } else {
+ fail "AOF loading didn't fail"
+ }
+
+ assert_equal 1 [count_message_lines $server_path/stdout "Found a non-monotonic sequence number"]
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can't load data when there are blank lines in the manifest file} {
+ create_aof $aof_dirpath $aof_incr1_file {
+ append_to_aof [formatCommand set k1 v1]
+ }
+
+ create_aof $aof_dirpath $aof_incr3_file {
+ append_to_aof [formatCommand set k2 v2]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
+ append_to_manifest "\n"
+ append_to_manifest "file appendonly.aof.3.incr.aof seq 3 type i\n"
+ }
+
+ start_server_aof [list dir $server_path] {
+ wait_for_condition 100 50 {
+ ! [is_alive $srv]
+ } else {
+ fail "AOF loading didn't fail"
+ }
+
+ assert_equal 1 [count_message_lines $server_path/stdout "Invalid AOF manifest file format"]
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can't load data when there is a duplicate base file} {
+ create_aof $aof_dirpath $aof_base1_file {
+ append_to_aof [formatCommand set k1 v1]
+ }
+
+ create_aof $aof_dirpath $aof_base2_file {
+ append_to_aof [formatCommand set k2 v2]
+ }
+
+ create_aof $aof_dirpath $aof_incr1_file {
+ append_to_aof [formatCommand set k3 v3]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n"
+ append_to_manifest "file appendonly.aof.2.base.aof seq 2 type b\n"
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
+ }
+
+ start_server_aof [list dir $server_path] {
+ wait_for_condition 100 50 {
+ ! [is_alive $srv]
+ } else {
+ fail "AOF loading didn't fail"
+ }
+
+ assert_equal 1 [count_message_lines $server_path/stdout "Found duplicate base file information"]
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can't load data when the manifest format is wrong (type unknown)} {
+ create_aof $aof_dirpath $aof_base1_file {
+ append_to_aof [formatCommand set k1 v1]
+ }
+
+ create_aof $aof_dirpath $aof_incr1_file {
+ append_to_aof [formatCommand set k3 v3]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.1.base.aof seq 1 type x\n"
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
+ }
+
+ start_server_aof [list dir $server_path] {
+ wait_for_condition 100 50 {
+ ! [is_alive $srv]
+ } else {
+ fail "AOF loading didn't fail"
+ }
+
+ assert_equal 1 [count_message_lines $server_path/stdout "Unknown AOF file type"]
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can't load data when the manifest format is wrong (missing key)} {
+ create_aof $aof_dirpath $aof_base1_file {
+ append_to_aof [formatCommand set k1 v1]
+ }
+
+ create_aof $aof_dirpath $aof_incr1_file {
+ append_to_aof [formatCommand set k3 v3]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "filx appendonly.aof.1.base.aof seq 1 type b\n"
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
+ }
+
+ start_server_aof [list dir $server_path] {
+ wait_for_condition 100 50 {
+ ! [is_alive $srv]
+ } else {
+ fail "AOF loading didn't fail"
+ }
+
+ assert_equal 2 [count_message_lines $server_path/stdout "Invalid AOF manifest file format"]
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can't load data when the manifest format is wrong (line too short)} {
+ create_aof $aof_dirpath $aof_base1_file {
+ append_to_aof [formatCommand set k1 v1]
+ }
+
+ create_aof $aof_dirpath $aof_incr1_file {
+ append_to_aof [formatCommand set k3 v3]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n"
+ append_to_manifest "file appendonly.aof.1.incr.aof type i\n"
+ }
+
+ start_server_aof [list dir $server_path] {
+ wait_for_condition 100 50 {
+ ! [is_alive $srv]
+ } else {
+ fail "AOF loading didn't fail"
+ }
+
+ assert_equal 3 [count_message_lines $server_path/stdout "Invalid AOF manifest file format"]
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can't load data when the manifest format is wrong (line too long)} {
+ create_aof $aof_dirpath $aof_base1_file {
+ append_to_aof [formatCommand set k1 v1]
+ }
+
+ create_aof $aof_dirpath $aof_incr1_file {
+ append_to_aof [formatCommand set k3 v3]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b file appendonly.aof.1.base.aof seq 1 type b\n"
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
+ }
+
+ start_server_aof [list dir $server_path] {
+ wait_for_condition 100 50 {
+ ! [is_alive $srv]
+ } else {
+ fail "AOF loading didn't fail"
+ }
+
+ assert_equal 1 [count_message_lines $server_path/stdout "The AOF manifest file contains too long line"]
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can't load data when the manifest format is wrong (odd parameter)} {
+ create_aof $aof_dirpath $aof_base1_file {
+ append_to_aof [formatCommand set k1 v1]
+ }
+
+ create_aof $aof_dirpath $aof_incr1_file {
+ append_to_aof [formatCommand set k3 v3]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n"
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i newkey\n"
+ }
+
+ start_server_aof [list dir $server_path] {
+ wait_for_condition 100 50 {
+ ! [is_alive $srv]
+ } else {
+ fail "AOF loading didn't fail"
+ }
+
+ assert_equal 4 [count_message_lines $server_path/stdout "Invalid AOF manifest file format"]
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can't load data when the manifest file is empty} {
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ }
+
+ start_server_aof [list dir $server_path] {
+ wait_for_condition 100 50 {
+ ! [is_alive $srv]
+ } else {
+ fail "AOF loading didn't fail"
+ }
+
+ assert_equal 1 [count_message_lines $server_path/stdout "Found an empty AOF manifest"]
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can start when no aof and no manifest} {
+ start_server_aof [list dir $server_path] {
+ assert_equal 1 [is_alive $srv]
+
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+
+ assert_equal OK [$client set k1 v1]
+ assert_equal v1 [$client get k1]
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can start when we have en empty AOF dir} {
+ create_aof_dir $aof_dirpath
+
+ start_server_aof [list dir $server_path] {
+ assert_equal 1 [is_alive $srv]
+ }
+ }
+
+ test {Multi Part AOF can load data discontinuously increasing sequence} {
+ create_aof $aof_dirpath $aof_base1_file {
+ append_to_aof [formatCommand set k1 v1]
+ }
+
+ create_aof $aof_dirpath $aof_incr1_file {
+ append_to_aof [formatCommand set k2 v2]
+ }
+
+ create_aof $aof_dirpath $aof_incr3_file {
+ append_to_aof [formatCommand set k3 v3]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n"
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
+ append_to_manifest "file appendonly.aof.3.incr.aof seq 3 type i\n"
+ }
+
+ start_server_aof [list dir $server_path] {
+ assert_equal 1 [is_alive $srv]
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+
+ assert_equal v1 [$client get k1]
+ assert_equal v2 [$client get k2]
+ assert_equal v3 [$client get k3]
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can load data when manifest add new k-v} {
+ create_aof $aof_dirpath $aof_base1_file {
+ append_to_aof [formatCommand set k1 v1]
+ }
+
+ create_aof $aof_dirpath $aof_incr1_file {
+ append_to_aof [formatCommand set k2 v2]
+ }
+
+ create_aof $aof_dirpath $aof_incr3_file {
+ append_to_aof [formatCommand set k3 v3]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b newkey newvalue\n"
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
+ append_to_manifest "file appendonly.aof.3.incr.aof seq 3 type i\n"
+ }
+
+ start_server_aof [list dir $server_path] {
+ assert_equal 1 [is_alive $srv]
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+
+ assert_equal v1 [$client get k1]
+ assert_equal v2 [$client get k2]
+ assert_equal v3 [$client get k3]
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can load data when some AOFs are empty} {
+ create_aof $aof_dirpath $aof_base1_file {
+ append_to_aof [formatCommand set k1 v1]
+ }
+
+ create_aof $aof_dirpath $aof_incr1_file {
+ }
+
+ create_aof $aof_dirpath $aof_incr3_file {
+ append_to_aof [formatCommand set k3 v3]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n"
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
+ append_to_manifest "file appendonly.aof.3.incr.aof seq 3 type i\n"
+ }
+
+ start_server_aof [list dir $server_path] {
+ assert_equal 1 [is_alive $srv]
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+
+ assert_equal v1 [$client get k1]
+ assert_equal "" [$client get k2]
+ assert_equal v3 [$client get k3]
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can load data from old version redis (rdb preamble no)} {
+ create_aof $server_path $aof_old_name_old_path {
+ append_to_aof [formatCommand set k1 v1]
+ append_to_aof [formatCommand set k2 v2]
+ append_to_aof [formatCommand set k3 v3]
+ }
+
+ start_server_aof [list dir $server_path] {
+ assert_equal 1 [is_alive $srv]
+
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+
+ assert_equal v1 [$client get k1]
+ assert_equal v2 [$client get k2]
+ assert_equal v3 [$client get k3]
+
+ assert_equal 0 [check_file_exist $server_path $aof_basename]
+ assert_equal 1 [check_file_exist $aof_dirpath $aof_basename]
+
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof seq 1 type b}
+ {file appendonly.aof.1.incr.aof seq 1 type i}
+ }
+
+ assert_equal OK [$client set k4 v4]
+
+ $client bgrewriteaof
+ waitForBgrewriteaof $client
+
+ assert_equal OK [$client set k5 v5]
+
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.2.base.rdb seq 2 type b}
+ {file appendonly.aof.2.incr.aof seq 2 type i}
+ }
+
+ set d1 [$client debug digest]
+ $client debug loadaof
+ set d2 [$client debug digest]
+ assert {$d1 eq $d2}
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can load data from old version redis (rdb preamble yes)} {
+ exec cp tests/assets/rdb-preamble.aof $aof_old_name_old_path
+ start_server_aof [list dir $server_path] {
+ assert_equal 1 [is_alive $srv]
+
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+
+ # k1 k2 in rdb header and k3 in AOF tail
+ assert_equal v1 [$client get k1]
+ assert_equal v2 [$client get k2]
+ assert_equal v3 [$client get k3]
+
+ assert_equal 0 [check_file_exist $server_path $aof_basename]
+ assert_equal 1 [check_file_exist $aof_dirpath $aof_basename]
+
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof seq 1 type b}
+ {file appendonly.aof.1.incr.aof seq 1 type i}
+ }
+
+ assert_equal OK [$client set k4 v4]
+
+ $client bgrewriteaof
+ waitForBgrewriteaof $client
+
+ assert_equal OK [$client set k5 v5]
+
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.2.base.rdb seq 2 type b}
+ {file appendonly.aof.2.incr.aof seq 2 type i}
+ }
+
+ set d1 [$client debug digest]
+ $client debug loadaof
+ set d2 [$client debug digest]
+ assert {$d1 eq $d2}
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can continue the upgrade from the interrupted upgrade state} {
+ create_aof $server_path $aof_old_name_old_path {
+ append_to_aof [formatCommand set k1 v1]
+ append_to_aof [formatCommand set k2 v2]
+ append_to_aof [formatCommand set k3 v3]
+ }
+
+ # Create a layout of an interrupted upgrade (interrupted before the rename).
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof seq 1 type b\n"
+ }
+
+ start_server_aof [list dir $server_path] {
+ assert_equal 1 [is_alive $srv]
+
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+
+ assert_equal v1 [$client get k1]
+ assert_equal v2 [$client get k2]
+ assert_equal v3 [$client get k3]
+
+ assert_equal 0 [check_file_exist $server_path $aof_basename]
+ assert_equal 1 [check_file_exist $aof_dirpath $aof_basename]
+
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof seq 1 type b}
+ {file appendonly.aof.1.incr.aof seq 1 type i}
+ }
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can be loaded correctly when both server dir and aof dir contain old AOF} {
+ create_aof $server_path $aof_old_name_old_path {
+ append_to_aof [formatCommand set k1 v1]
+ append_to_aof [formatCommand set k2 v2]
+ append_to_aof [formatCommand set k3 v3]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof seq 1 type b\n"
+ }
+
+ create_aof $aof_dirpath $aof_old_name_new_path {
+ append_to_aof [formatCommand set k4 v4]
+ append_to_aof [formatCommand set k5 v5]
+ append_to_aof [formatCommand set k6 v6]
+ }
+
+ start_server_aof [list dir $server_path] {
+ assert_equal 1 [is_alive $srv]
+
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+
+ assert_equal 0 [$client exists k1]
+ assert_equal 0 [$client exists k2]
+ assert_equal 0 [$client exists k3]
+
+ assert_equal v4 [$client get k4]
+ assert_equal v5 [$client get k5]
+ assert_equal v6 [$client get k6]
+
+ assert_equal 1 [check_file_exist $server_path $aof_basename]
+ assert_equal 1 [check_file_exist $aof_dirpath $aof_basename]
+
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof seq 1 type b}
+ {file appendonly.aof.1.incr.aof seq 1 type i}
+ }
+ }
+
+ clean_aof_persistence $aof_dirpath
+ catch {exec rm -rf $aof_old_name_old_path}
+ }
+
+ test {Multi Part AOF can't load data when the manifest contains the old AOF file name but the file does not exist in server dir and aof dir} {
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof seq 1 type b\n"
+ }
+
+ start_server_aof [list dir $server_path] {
+ wait_for_condition 100 50 {
+ ! [is_alive $srv]
+ } else {
+ fail "AOF loading didn't fail"
+ }
+
+ assert_equal 1 [count_message_lines $server_path/stdout "appendonly.aof .*No such file or directory"]
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can upgrade when when two redis share the same server dir} {
+ create_aof $server_path $aof_old_name_old_path {
+ append_to_aof [formatCommand set k1 v1]
+ append_to_aof [formatCommand set k2 v2]
+ append_to_aof [formatCommand set k3 v3]
+ }
+
+ create_aof $server_path $aof_old_name_old_path2 {
+ append_to_aof [formatCommand set k4 v4]
+ append_to_aof [formatCommand set k5 v5]
+ append_to_aof [formatCommand set k6 v6]
+ }
+
+ start_server_aof [list dir $server_path] {
+ set redis1 [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+
+ start_server [list overrides [list dir $server_path appendonly yes appendfilename appendonly.aof2]] {
+ set redis2 [redis [srv host] [srv port] 0 $::tls]
+
+ test "Multi Part AOF can upgrade when when two redis share the same server dir (redis1)" {
+ wait_done_loading $redis1
+ assert_equal v1 [$redis1 get k1]
+ assert_equal v2 [$redis1 get k2]
+ assert_equal v3 [$redis1 get k3]
+
+ assert_equal 0 [$redis1 exists k4]
+ assert_equal 0 [$redis1 exists k5]
+ assert_equal 0 [$redis1 exists k6]
+
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof seq 1 type b}
+ {file appendonly.aof.1.incr.aof seq 1 type i}
+ }
+
+ $redis1 bgrewriteaof
+ waitForBgrewriteaof $redis1
+
+ assert_equal OK [$redis1 set k v]
+
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.2.base.rdb seq 2 type b}
+ {file appendonly.aof.2.incr.aof seq 2 type i}
+ }
+
+ set d1 [$redis1 debug digest]
+ $redis1 debug loadaof
+ set d2 [$redis1 debug digest]
+ assert {$d1 eq $d2}
+ }
+
+ test "Multi Part AOF can upgrade when when two redis share the same server dir (redis2)" {
+ wait_done_loading $redis2
+
+ assert_equal 0 [$redis2 exists k1]
+ assert_equal 0 [$redis2 exists k2]
+ assert_equal 0 [$redis2 exists k3]
+
+ assert_equal v4 [$redis2 get k4]
+ assert_equal v5 [$redis2 get k5]
+ assert_equal v6 [$redis2 get k6]
+
+ assert_aof_manifest_content $aof_manifest_file2 {
+ {file appendonly.aof2 seq 1 type b}
+ {file appendonly.aof2.1.incr.aof seq 1 type i}
+ }
+
+ $redis2 bgrewriteaof
+ waitForBgrewriteaof $redis2
+
+ assert_equal OK [$redis2 set k v]
+
+ assert_aof_manifest_content $aof_manifest_file2 {
+ {file appendonly.aof2.2.base.rdb seq 2 type b}
+ {file appendonly.aof2.2.incr.aof seq 2 type i}
+ }
+
+ set d1 [$redis2 debug digest]
+ $redis2 debug loadaof
+ set d2 [$redis2 debug digest]
+ assert {$d1 eq $d2}
+ }
+ }
+ }
+ }
+
+ test {Multi Part AOF can handle appendfilename contains whitespaces} {
+ start_server [list overrides [list appendonly yes appendfilename "\" file seq \\n\\n.aof \""]] {
+ set dir [get_redis_dir]
+ set aof_manifest_name [format "%s/%s/%s%s" $dir "appendonlydir" " file seq \n\n.aof " $::manifest_suffix]
+ set redis [redis [srv host] [srv port] 0 $::tls]
+
+ assert_equal OK [$redis set k1 v1]
+
+ $redis bgrewriteaof
+ waitForBgrewriteaof $redis
+
+ assert_aof_manifest_content $aof_manifest_name {
+ {file " file seq \n\n.aof .2.base.rdb" seq 2 type b}
+ {file " file seq \n\n.aof .2.incr.aof" seq 2 type i}
+ }
+
+ set d1 [$redis debug digest]
+ $redis debug loadaof
+ set d2 [$redis debug digest]
+ assert {$d1 eq $d2}
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can create BASE (RDB format) when redis starts from empty} {
+ start_server_aof [list dir $server_path] {
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.1${::base_aof_sufix}${::rdb_format_suffix}"]
+
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.1.base.rdb seq 1 type b}
+ {file appendonly.aof.1.incr.aof seq 1 type i}
+ }
+
+ $client set foo behavior
+
+ set d1 [$client debug digest]
+ $client debug loadaof
+ set d2 [$client debug digest]
+ assert {$d1 eq $d2}
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ test {Multi Part AOF can create BASE (AOF format) when redis starts from empty} {
+ start_server_aof [list dir $server_path aof-use-rdb-preamble no] {
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.1${::base_aof_sufix}${::aof_format_suffix}"]
+
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.1.base.aof seq 1 type b}
+ {file appendonly.aof.1.incr.aof seq 1 type i}
+ }
+
+ $client set foo behavior
+
+ set d1 [$client debug digest]
+ $client debug loadaof
+ set d2 [$client debug digest]
+ assert {$d1 eq $d2}
+ }
+
+ clean_aof_persistence $aof_dirpath
+ }
+
+ # Test Part 2
+ #
+ # To test whether the AOFRW behaves as expected during the redis run.
+ # We will start redis first, then perform pressure writing, enable and disable AOF, and manually
+ # and automatically run bgrewrite and other actions, to test whether the correct AOF file is created,
+ # whether the correct manifest is generated, whether the data can be reload correctly under continuous
+ # writing pressure, etc.
+
+
+ start_server {tags {"Multi Part AOF"} overrides {aof-use-rdb-preamble {yes} appendonly {no} save {}}} {
+ set dir [get_redis_dir]
+ set aof_basename "appendonly.aof"
+ set aof_dirname "appendonlydir"
+ set aof_dirpath "$dir/$aof_dirname"
+ set aof_manifest_name "$aof_basename$::manifest_suffix"
+ set aof_manifest_file "$dir/$aof_dirname/$aof_manifest_name"
+
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+
+ catch {exec rm -rf $aof_manifest_file}
+
+ test "Make sure aof manifest $aof_manifest_name not in aof directory" {
+ assert_equal 0 [file exists $aof_manifest_file]
+ }
+
+ test "AOF enable will create manifest file" {
+ r config set appendonly yes ; # Will create manifest and new INCR aof
+ r config set auto-aof-rewrite-percentage 0 ; # Disable auto-rewrite.
+ waitForBgrewriteaof r
+
+ # Start write load
+ set load_handle0 [start_write_load $master_host $master_port 10]
+
+ wait_for_condition 50 100 {
+ [r dbsize] > 0
+ } else {
+ fail "No write load detected."
+ }
+
+ # First AOFRW done
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.1.base.rdb seq 1 type b}
+ {file appendonly.aof.1.incr.aof seq 1 type i}
+ }
+
+ # Check we really have these files
+ assert_equal 1 [check_file_exist $aof_dirpath $aof_manifest_name]
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.1${::base_aof_sufix}${::rdb_format_suffix}"]
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.1${::incr_aof_sufix}${::aof_format_suffix}"]
+
+ r bgrewriteaof
+ waitForBgrewriteaof r
+
+ # The second AOFRW done
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.2.base.rdb seq 2 type b}
+ {file appendonly.aof.2.incr.aof seq 2 type i}
+ }
+
+ assert_equal 1 [check_file_exist $aof_dirpath $aof_manifest_name]
+ # Wait bio delete history
+ wait_for_condition 1000 10 {
+ [check_file_exist $aof_dirpath "${aof_basename}.1${::base_aof_sufix}${::rdb_format_suffix}"] == 0 &&
+ [check_file_exist $aof_dirpath "${aof_basename}.1${::incr_aof_sufix}${::aof_format_suffix}"] == 0
+ } else {
+ fail "Failed to delete history AOF"
+ }
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.2${::base_aof_sufix}${::rdb_format_suffix}"]
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.2${::incr_aof_sufix}${::aof_format_suffix}"]
+
+ stop_write_load $load_handle0
+ wait_load_handlers_disconnected
+
+ set d1 [r debug digest]
+ r debug loadaof
+ set d2 [r debug digest]
+ assert {$d1 eq $d2}
+ }
+
+ test "AOF multiple rewrite failures will open multiple INCR AOFs" {
+ # Start write load
+ r config set rdb-key-save-delay 10000000
+
+ set orig_size [r dbsize]
+ set load_handle0 [start_write_load $master_host $master_port 10]
+
+ wait_for_condition 50 100 {
+ [r dbsize] > $orig_size
+ } else {
+ fail "No write load detected."
+ }
+
+ # Let AOFRW fail three times
+ r bgrewriteaof
+ set pid1 [get_child_pid 0]
+ catch {exec kill -9 $pid1}
+ waitForBgrewriteaof r
+
+ r bgrewriteaof
+ set pid2 [get_child_pid 0]
+ catch {exec kill -9 $pid2}
+ waitForBgrewriteaof r
+
+ r bgrewriteaof
+ set pid3 [get_child_pid 0]
+ catch {exec kill -9 $pid3}
+ waitForBgrewriteaof r
+
+ assert_equal 0 [check_file_exist $dir "temp-rewriteaof-bg-$pid1.aof"]
+ assert_equal 0 [check_file_exist $dir "temp-rewriteaof-bg-$pid2.aof"]
+ assert_equal 0 [check_file_exist $dir "temp-rewriteaof-bg-$pid3.aof"]
+
+ # We will have four INCR AOFs
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.2.base.rdb seq 2 type b}
+ {file appendonly.aof.2.incr.aof seq 2 type i}
+ {file appendonly.aof.3.incr.aof seq 3 type i}
+ {file appendonly.aof.4.incr.aof seq 4 type i}
+ {file appendonly.aof.5.incr.aof seq 5 type i}
+ }
+
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.2${::base_aof_sufix}${::rdb_format_suffix}"]
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.2${::incr_aof_sufix}${::aof_format_suffix}"]
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.3${::incr_aof_sufix}${::aof_format_suffix}"]
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.4${::incr_aof_sufix}${::aof_format_suffix}"]
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.5${::incr_aof_sufix}${::aof_format_suffix}"]
+
+ stop_write_load $load_handle0
+ wait_load_handlers_disconnected
+
+ set d1 [r debug digest]
+ r debug loadaof
+ set d2 [r debug digest]
+ assert {$d1 eq $d2}
+
+ r config set rdb-key-save-delay 0
+ catch {exec kill -9 [get_child_pid 0]}
+ wait_for_condition 1000 10 {
+ [s rdb_bgsave_in_progress] eq 0
+ } else {
+ fail "bgsave did not stop in time"
+ }
+
+ # AOFRW success
+ r bgrewriteaof
+ waitForBgrewriteaof r
+
+ # All previous INCR AOFs have become history
+ # and have be deleted
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.3.base.rdb seq 3 type b}
+ {file appendonly.aof.6.incr.aof seq 6 type i}
+ }
+
+ # Wait bio delete history
+ wait_for_condition 1000 10 {
+ [check_file_exist $aof_dirpath "${aof_basename}.2${::base_aof_sufix}${::rdb_format_suffix}"] == 0 &&
+ [check_file_exist $aof_dirpath "${aof_basename}.2${::incr_aof_sufix}${::aof_format_suffix}"] == 0 &&
+ [check_file_exist $aof_dirpath "${aof_basename}.3${::incr_aof_sufix}${::aof_format_suffix}"] == 0 &&
+ [check_file_exist $aof_dirpath "${aof_basename}.4${::incr_aof_sufix}${::aof_format_suffix}"] == 0 &&
+ [check_file_exist $aof_dirpath "${aof_basename}.5${::incr_aof_sufix}${::aof_format_suffix}"] == 0
+ } else {
+ fail "Failed to delete history AOF"
+ }
+
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.3${::base_aof_sufix}${::rdb_format_suffix}"]
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.6${::incr_aof_sufix}${::aof_format_suffix}"]
+
+ set d1 [r debug digest]
+ r debug loadaof
+ set d2 [r debug digest]
+ assert {$d1 eq $d2}
+ }
+
+ test "AOF rewrite doesn't open new aof when AOF turn off" {
+ r config set appendonly no
+
+ r bgrewriteaof
+ waitForBgrewriteaof r
+
+ # We only have BASE AOF, no INCR AOF
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.4.base.rdb seq 4 type b}
+ }
+
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.4${::base_aof_sufix}${::rdb_format_suffix}"]
+ wait_for_condition 1000 10 {
+ [check_file_exist $aof_dirpath "${aof_basename}.6${::incr_aof_sufix}${::aof_format_suffix}"] == 0 &&
+ [check_file_exist $aof_dirpath "${aof_basename}.7${::incr_aof_sufix}${::aof_format_suffix}"] == 0
+ } else {
+ fail "Failed to delete history AOF"
+ }
+
+ set d1 [r debug digest]
+ r debug loadaof
+ set d2 [r debug digest]
+ assert {$d1 eq $d2}
+
+ # Turn on AOF again
+ r config set appendonly yes
+ waitForBgrewriteaof r
+
+ # A new INCR AOF was created
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.5.base.rdb seq 5 type b}
+ {file appendonly.aof.1.incr.aof seq 1 type i}
+ }
+
+ # Wait bio delete history
+ wait_for_condition 1000 10 {
+ [check_file_exist $aof_dirpath "${aof_basename}.4${::base_aof_sufix}${::rdb_format_suffix}"] == 0
+ } else {
+ fail "Failed to delete history AOF"
+ }
+
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.5${::base_aof_sufix}${::rdb_format_suffix}"]
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.1${::incr_aof_sufix}${::aof_format_suffix}"]
+ }
+
+ test "AOF enable/disable auto gc" {
+ r config set aof-disable-auto-gc yes
+
+ r bgrewriteaof
+ waitForBgrewriteaof r
+
+ r bgrewriteaof
+ waitForBgrewriteaof r
+
+ # We can see four history AOFs (Evolved from two BASE and two INCR)
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.7.base.rdb seq 7 type b}
+ {file appendonly.aof.2.incr.aof seq 2 type h}
+ {file appendonly.aof.6.base.rdb seq 6 type h}
+ {file appendonly.aof.1.incr.aof seq 1 type h}
+ {file appendonly.aof.5.base.rdb seq 5 type h}
+ {file appendonly.aof.3.incr.aof seq 3 type i}
+ }
+
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.5${::base_aof_sufix}${::rdb_format_suffix}"]
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.6${::base_aof_sufix}${::rdb_format_suffix}"]
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.1${::incr_aof_sufix}${::aof_format_suffix}"]
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.2${::incr_aof_sufix}${::aof_format_suffix}"]
+
+ r config set aof-disable-auto-gc no
+
+ # Auto gc success
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.7.base.rdb seq 7 type b}
+ {file appendonly.aof.3.incr.aof seq 3 type i}
+ }
+
+ # wait bio delete history
+ wait_for_condition 1000 10 {
+ [check_file_exist $aof_dirpath "${aof_basename}.5${::base_aof_sufix}${::rdb_format_suffix}"] == 0 &&
+ [check_file_exist $aof_dirpath "${aof_basename}.6${::base_aof_sufix}${::rdb_format_suffix}"] == 0 &&
+ [check_file_exist $aof_dirpath "${aof_basename}.1${::incr_aof_sufix}${::aof_format_suffix}"] == 0 &&
+ [check_file_exist $aof_dirpath "${aof_basename}.2${::incr_aof_sufix}${::aof_format_suffix}"] == 0
+ } else {
+ fail "Failed to delete history AOF"
+ }
+ }
+
+ test "AOF can produce consecutive sequence number after reload" {
+ # Current manifest, BASE seq 7 and INCR seq 3
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.7.base.rdb seq 7 type b}
+ {file appendonly.aof.3.incr.aof seq 3 type i}
+ }
+
+ r debug loadaof
+
+ # Trigger AOFRW
+ r bgrewriteaof
+ waitForBgrewriteaof r
+
+ # Now BASE seq is 8 and INCR seq is 4
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.8.base.rdb seq 8 type b}
+ {file appendonly.aof.4.incr.aof seq 4 type i}
+ }
+ }
+
+ test "AOF enable during BGSAVE will not write data util AOFRW finish" {
+ r config set appendonly no
+ r config set save ""
+ r config set rdb-key-save-delay 10000000
+
+ r set k1 v1
+ r bgsave
+
+ wait_for_condition 1000 10 {
+ [s rdb_bgsave_in_progress] eq 1
+ } else {
+ fail "bgsave did not start in time"
+ }
+
+ # Make server.aof_rewrite_scheduled = 1
+ r config set appendonly yes
+ assert_equal [s aof_rewrite_scheduled] 1
+
+ # Not open new INCR aof
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.8.base.rdb seq 8 type b}
+ {file appendonly.aof.4.incr.aof seq 4 type i}
+ }
+
+ r set k2 v2
+ r debug loadaof
+
+ # Both k1 and k2 lost
+ assert_equal 0 [r exists k1]
+ assert_equal 0 [r exists k2]
+
+ set total_forks [s total_forks]
+ assert_equal [s rdb_bgsave_in_progress] 1
+ r config set rdb-key-save-delay 0
+ catch {exec kill -9 [get_child_pid 0]}
+ wait_for_condition 1000 10 {
+ [s rdb_bgsave_in_progress] eq 0
+ } else {
+ fail "bgsave did not stop in time"
+ }
+
+ # Make sure AOFRW was scheduled
+ wait_for_condition 1000 10 {
+ [s total_forks] == [expr $total_forks + 1]
+ } else {
+ fail "aof rewrite did not scheduled"
+ }
+ waitForBgrewriteaof r
+
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.9.base.rdb seq 9 type b}
+ {file appendonly.aof.5.incr.aof seq 5 type i}
+ }
+
+ r set k3 v3
+ r debug loadaof
+ assert_equal v3 [r get k3]
+ }
+
+ test "AOF will trigger limit when AOFRW fails many times" {
+ # Clear all data and trigger a successful AOFRW, so we can let
+ # server.aof_current_size equal to 0
+ r flushall
+ r bgrewriteaof
+ waitForBgrewriteaof r
+
+ r config set rdb-key-save-delay 10000000
+ # Let us trigger AOFRW easily
+ r config set auto-aof-rewrite-percentage 1
+ r config set auto-aof-rewrite-min-size 1kb
+
+ # Set a key so that AOFRW can be delayed
+ r set k v
+
+ # Let AOFRW fail 3 times, this will trigger AOFRW limit
+ r bgrewriteaof
+ catch {exec kill -9 [get_child_pid 0]}
+ waitForBgrewriteaof r
+
+ r bgrewriteaof
+ catch {exec kill -9 [get_child_pid 0]}
+ waitForBgrewriteaof r
+
+ r bgrewriteaof
+ catch {exec kill -9 [get_child_pid 0]}
+ waitForBgrewriteaof r
+
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.10.base.rdb seq 10 type b}
+ {file appendonly.aof.6.incr.aof seq 6 type i}
+ {file appendonly.aof.7.incr.aof seq 7 type i}
+ {file appendonly.aof.8.incr.aof seq 8 type i}
+ {file appendonly.aof.9.incr.aof seq 9 type i}
+ }
+
+ # Write 1KB data to trigger AOFRW
+ r set x [string repeat x 1024]
+
+ # Make sure we have limit log
+ wait_for_condition 1000 50 {
+ [count_log_message 0 "triggered the limit"] == 1
+ } else {
+ fail "aof rewrite did not trigger limit"
+ }
+ assert_equal [status r aof_rewrite_in_progress] 0
+
+ # No new INCR AOF be created
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.10.base.rdb seq 10 type b}
+ {file appendonly.aof.6.incr.aof seq 6 type i}
+ {file appendonly.aof.7.incr.aof seq 7 type i}
+ {file appendonly.aof.8.incr.aof seq 8 type i}
+ {file appendonly.aof.9.incr.aof seq 9 type i}
+ }
+
+ # Turn off auto rewrite
+ r config set auto-aof-rewrite-percentage 0
+ r config set rdb-key-save-delay 0
+ catch {exec kill -9 [get_child_pid 0]}
+ wait_for_condition 1000 10 {
+ [s aof_rewrite_in_progress] eq 0
+ } else {
+ fail "aof rewrite did not stop in time"
+ }
+
+ # We can still manually execute AOFRW immediately
+ r bgrewriteaof
+ waitForBgrewriteaof r
+
+ # Can create New INCR AOF
+ assert_equal 1 [check_file_exist $aof_dirpath "${aof_basename}.10${::incr_aof_sufix}${::aof_format_suffix}"]
+
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.11.base.rdb seq 11 type b}
+ {file appendonly.aof.10.incr.aof seq 10 type i}
+ }
+
+ set d1 [r debug digest]
+ r debug loadaof
+ set d2 [r debug digest]
+ assert {$d1 eq $d2}
+ }
+
+ start_server {overrides {aof-use-rdb-preamble {yes} appendonly {no} save {}}} {
+ set dir [get_redis_dir]
+ set aof_basename "appendonly.aof"
+ set aof_dirname "appendonlydir"
+ set aof_dirpath "$dir/$aof_dirname"
+ set aof_manifest_name "$aof_basename$::manifest_suffix"
+ set aof_manifest_file "$dir/$aof_dirname/$aof_manifest_name"
+
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+
+ test "AOF will open a temporary INCR AOF to accumulate data until the first AOFRW success when AOF is dynamically enabled" {
+ r config set save ""
+ # Increase AOFRW execution time to give us enough time to kill it
+ r config set rdb-key-save-delay 10000000
+
+ # Start write load
+ set load_handle0 [start_write_load $master_host $master_port 10]
+
+ wait_for_condition 50 100 {
+ [r dbsize] > 0
+ } else {
+ fail "No write load detected."
+ }
+
+ # Enable AOF will trigger an initialized AOFRW
+ r config set appendonly yes
+ # Let AOFRW fail
+ assert_equal 1 [s aof_rewrite_in_progress]
+ set pid1 [get_child_pid 0]
+ catch {exec kill -9 $pid1}
+
+ # Wait for AOFRW to exit and delete temp incr aof
+ wait_for_condition 1000 100 {
+ [count_log_message 0 "Removing the temp incr aof file"] == 1
+ } else {
+ fail "temp aof did not delete"
+ }
+
+ # Make sure manifest file is not created
+ assert_equal 0 [check_file_exist $aof_dirpath $aof_manifest_name]
+ # Make sure BASE AOF is not created
+ assert_equal 0 [check_file_exist $aof_dirpath "${aof_basename}.1${::base_aof_sufix}${::rdb_format_suffix}"]
+
+ # Make sure the next AOFRW has started
+ wait_for_condition 1000 50 {
+ [s aof_rewrite_in_progress] == 1
+ } else {
+ fail "aof rewrite did not scheduled"
+ }
+
+ # Do a successful AOFRW
+ set total_forks [s total_forks]
+ r config set rdb-key-save-delay 0
+ catch {exec kill -9 [get_child_pid 0]}
+
+ # Make sure the next AOFRW has started
+ wait_for_condition 1000 10 {
+ [s total_forks] == [expr $total_forks + 1]
+ } else {
+ fail "aof rewrite did not scheduled"
+ }
+ waitForBgrewriteaof r
+
+ assert_equal 2 [count_log_message 0 "Removing the temp incr aof file"]
+
+ # BASE and INCR AOF are successfully created
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.1.base.rdb seq 1 type b}
+ {file appendonly.aof.1.incr.aof seq 1 type i}
+ }
+
+ stop_write_load $load_handle0
+ wait_load_handlers_disconnected
+
+ set d1 [r debug digest]
+ r debug loadaof
+ set d2 [r debug digest]
+ assert {$d1 eq $d2}
+
+ # Dynamic disable AOF again
+ r config set appendonly no
+
+ # Disabling AOF does not delete previous AOF files
+ r debug loadaof
+ set d2 [r debug digest]
+ assert {$d1 eq $d2}
+
+ assert_equal 0 [s rdb_changes_since_last_save]
+ r config set rdb-key-save-delay 10000000
+ set load_handle0 [start_write_load $master_host $master_port 10]
+ wait_for_condition 50 100 {
+ [s rdb_changes_since_last_save] > 0
+ } else {
+ fail "No write load detected."
+ }
+
+ # Re-enable AOF
+ r config set appendonly yes
+
+ # Let AOFRW fail
+ assert_equal 1 [s aof_rewrite_in_progress]
+ set pid1 [get_child_pid 0]
+ catch {exec kill -9 $pid1}
+
+ # Wait for AOFRW to exit and delete temp incr aof
+ wait_for_condition 1000 100 {
+ [count_log_message 0 "Removing the temp incr aof file"] == 3
+ } else {
+ fail "temp aof did not delete 3 times"
+ }
+
+ # Make sure no new incr AOF was created
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.1.base.rdb seq 1 type b}
+ {file appendonly.aof.1.incr.aof seq 1 type i}
+ }
+
+ # Make sure the next AOFRW has started
+ wait_for_condition 1000 50 {
+ [s aof_rewrite_in_progress] == 1
+ } else {
+ fail "aof rewrite did not scheduled"
+ }
+
+ # Do a successful AOFRW
+ set total_forks [s total_forks]
+ r config set rdb-key-save-delay 0
+ catch {exec kill -9 [get_child_pid 0]}
+
+ wait_for_condition 1000 10 {
+ [s total_forks] == [expr $total_forks + 1]
+ } else {
+ fail "aof rewrite did not scheduled"
+ }
+ waitForBgrewriteaof r
+
+ assert_equal 4 [count_log_message 0 "Removing the temp incr aof file"]
+
+ # New BASE and INCR AOF are successfully created
+ assert_aof_manifest_content $aof_manifest_file {
+ {file appendonly.aof.2.base.rdb seq 2 type b}
+ {file appendonly.aof.2.incr.aof seq 2 type i}
+ }
+
+ stop_write_load $load_handle0
+ wait_load_handlers_disconnected
+
+ set d1 [r debug digest]
+ r debug loadaof
+ set d2 [r debug digest]
+ assert {$d1 eq $d2}
+ }
+ }
+ }
+}
diff --git a/tests/integration/aof-race.tcl b/tests/integration/aof-race.tcl
new file mode 100644
index 0000000..32f3a74
--- /dev/null
+++ b/tests/integration/aof-race.tcl
@@ -0,0 +1,37 @@
+source tests/support/aofmanifest.tcl
+set defaults { appendonly {yes} appendfilename {appendonly.aof} appenddirname {appendonlydir} aof-use-rdb-preamble {no} }
+set server_path [tmpdir server.aof]
+
+tags {"aof external:skip"} {
+ # Specific test for a regression where internal buffers were not properly
+ # cleaned after a child responsible for an AOF rewrite exited. This buffer
+ # was subsequently appended to the new AOF, resulting in duplicate commands.
+ start_server_aof [list dir $server_path] {
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ set bench [open "|src/redis-benchmark -q -s [dict get $srv unixsocket] -c 20 -n 20000 incr foo" "r+"]
+
+ wait_for_condition 100 1 {
+ [$client get foo] > 0
+ } else {
+ # Don't care if it fails.
+ }
+
+ # Benchmark should be running by now: start background rewrite
+ $client bgrewriteaof
+
+ # Read until benchmark pipe reaches EOF
+ while {[string length [read $bench]] > 0} {}
+
+ waitForBgrewriteaof $client
+
+ # Check contents of foo
+ assert_equal 20000 [$client get foo]
+ }
+
+ # Restart server to replay AOF
+ start_server_aof [list dir $server_path] {
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+ assert_equal 20000 [$client get foo]
+ }
+}
diff --git a/tests/integration/aof.tcl b/tests/integration/aof.tcl
new file mode 100644
index 0000000..1f73fc3
--- /dev/null
+++ b/tests/integration/aof.tcl
@@ -0,0 +1,681 @@
+source tests/support/aofmanifest.tcl
+set defaults { appendonly {yes} appendfilename {appendonly.aof} appenddirname {appendonlydir} auto-aof-rewrite-percentage {0}}
+set server_path [tmpdir server.aof]
+set aof_dirname "appendonlydir"
+set aof_basename "appendonly.aof"
+set aof_dirpath "$server_path/$aof_dirname"
+set aof_base_file "$server_path/$aof_dirname/${aof_basename}.1$::base_aof_sufix$::aof_format_suffix"
+set aof_file "$server_path/$aof_dirname/${aof_basename}.1$::incr_aof_sufix$::aof_format_suffix"
+set aof_manifest_file "$server_path/$aof_dirname/$aof_basename$::manifest_suffix"
+
+tags {"aof external:skip"} {
+ # Server can start when aof-load-truncated is set to yes and AOF
+ # is truncated, with an incomplete MULTI block.
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand set foo hello]
+ append_to_aof [formatCommand multi]
+ append_to_aof [formatCommand set bar world]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
+ }
+
+ start_server_aof [list dir $server_path aof-load-truncated yes] {
+ test "Unfinished MULTI: Server should start if load-truncated is yes" {
+ assert_equal 1 [is_alive $srv]
+ }
+ }
+
+ ## Should also start with truncated AOF without incomplete MULTI block.
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand incr foo]
+ append_to_aof [formatCommand incr foo]
+ append_to_aof [formatCommand incr foo]
+ append_to_aof [formatCommand incr foo]
+ append_to_aof [formatCommand incr foo]
+ append_to_aof [string range [formatCommand incr foo] 0 end-1]
+ }
+
+ start_server_aof [list dir $server_path aof-load-truncated yes] {
+ test "Short read: Server should start if load-truncated is yes" {
+ assert_equal 1 [is_alive $srv]
+ }
+
+ test "Truncated AOF loaded: we expect foo to be equal to 5" {
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+ assert {[$client get foo] eq "5"}
+ }
+
+ test "Append a new command after loading an incomplete AOF" {
+ $client incr foo
+ }
+ }
+
+ # Now the AOF file is expected to be correct
+ start_server_aof [list dir $server_path aof-load-truncated yes] {
+ test "Short read + command: Server should start" {
+ assert_equal 1 [is_alive $srv]
+ }
+
+ test "Truncated AOF loaded: we expect foo to be equal to 6 now" {
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+ assert {[$client get foo] eq "6"}
+ }
+ }
+
+ ## Test that the server exits when the AOF contains a format error
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand set foo hello]
+ append_to_aof "!!!"
+ append_to_aof [formatCommand set foo hello]
+ }
+
+ start_server_aof [list dir $server_path aof-load-truncated yes] {
+ test "Bad format: Server should have logged an error" {
+ set pattern "*Bad file format reading the append only file*"
+ set retry 10
+ while {$retry} {
+ set result [exec tail -1 < [dict get $srv stdout]]
+ if {[string match $pattern $result]} {
+ break
+ }
+ incr retry -1
+ after 1000
+ }
+ if {$retry == 0} {
+ error "assertion:expected error not found on config file"
+ }
+ }
+ }
+
+ ## Test the server doesn't start when the AOF contains an unfinished MULTI
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand set foo hello]
+ append_to_aof [formatCommand multi]
+ append_to_aof [formatCommand set bar world]
+ }
+
+ start_server_aof [list dir $server_path aof-load-truncated no] {
+ test "Unfinished MULTI: Server should have logged an error" {
+ set pattern "*Unexpected end of file reading the append only file*"
+ set retry 10
+ while {$retry} {
+ set result [exec tail -1 < [dict get $srv stdout]]
+ if {[string match $pattern $result]} {
+ break
+ }
+ incr retry -1
+ after 1000
+ }
+ if {$retry == 0} {
+ error "assertion:expected error not found on config file"
+ }
+ }
+ }
+
+ ## Test that the server exits when the AOF contains a short read
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand set foo hello]
+ append_to_aof [string range [formatCommand set bar world] 0 end-1]
+ }
+
+ start_server_aof [list dir $server_path aof-load-truncated no] {
+ test "Short read: Server should have logged an error" {
+ set pattern "*Unexpected end of file reading the append only file*"
+ set retry 10
+ while {$retry} {
+ set result [exec tail -1 < [dict get $srv stdout]]
+ if {[string match $pattern $result]} {
+ break
+ }
+ incr retry -1
+ after 1000
+ }
+ if {$retry == 0} {
+ error "assertion:expected error not found on config file"
+ }
+ }
+ }
+
+ ## Test that redis-check-aof indeed sees this AOF is not valid
+ test "Short read: Utility should confirm the AOF is not valid" {
+ catch {
+ exec src/redis-check-aof $aof_manifest_file
+ } result
+ assert_match "*not valid*" $result
+ }
+
+ test "Short read: Utility should show the abnormal line num in AOF" {
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand set foo hello]
+ append_to_aof "!!!"
+ }
+
+ catch {
+ exec src/redis-check-aof $aof_manifest_file
+ } result
+ assert_match "*ok_up_to_line=8*" $result
+ }
+
+ test "Short read: Utility should be able to fix the AOF" {
+ set result [exec src/redis-check-aof --fix $aof_manifest_file << "y\n"]
+ assert_match "*Successfully truncated AOF*" $result
+ }
+
+ ## Test that the server can be started using the truncated AOF
+ start_server_aof [list dir $server_path aof-load-truncated no] {
+ test "Fixed AOF: Server should have been started" {
+ assert_equal 1 [is_alive $srv]
+ }
+
+ test "Fixed AOF: Keyspace should contain values that were parseable" {
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+ assert_equal "hello" [$client get foo]
+ assert_equal "" [$client get bar]
+ }
+ }
+
+ ## Test that SPOP (that modifies the client's argc/argv) is correctly free'd
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand sadd set foo]
+ append_to_aof [formatCommand sadd set bar]
+ append_to_aof [formatCommand spop set]
+ }
+
+ start_server_aof [list dir $server_path aof-load-truncated no] {
+ test "AOF+SPOP: Server should have been started" {
+ assert_equal 1 [is_alive $srv]
+ }
+
+ test "AOF+SPOP: Set should have 1 member" {
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+ assert_equal 1 [$client scard set]
+ }
+ }
+
+ ## Uses the alsoPropagate() API.
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand sadd set foo]
+ append_to_aof [formatCommand sadd set bar]
+ append_to_aof [formatCommand sadd set gah]
+ append_to_aof [formatCommand spop set 2]
+ }
+
+ start_server_aof [list dir $server_path] {
+ test "AOF+SPOP: Server should have been started" {
+ assert_equal 1 [is_alive $srv]
+ }
+
+ test "AOF+SPOP: Set should have 1 member" {
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+ assert_equal 1 [$client scard set]
+ }
+ }
+
+ ## Test that PEXPIREAT is loaded correctly
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand rpush list foo]
+ append_to_aof [formatCommand pexpireat list 1000]
+ append_to_aof [formatCommand rpush list bar]
+ }
+
+ start_server_aof [list dir $server_path aof-load-truncated no] {
+ test "AOF+EXPIRE: Server should have been started" {
+ assert_equal 1 [is_alive $srv]
+ }
+
+ test "AOF+EXPIRE: List should be empty" {
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+ assert_equal 0 [$client llen list]
+ }
+ }
+
+ start_server {overrides {appendonly {yes}}} {
+ test {Redis should not try to convert DEL into EXPIREAT for EXPIRE -1} {
+ r set x 10
+ r expire x -1
+ }
+ }
+
+ start_server {overrides {appendonly {yes} appendfsync always}} {
+ test {AOF fsync always barrier issue} {
+ set rd [redis_deferring_client]
+ # Set a sleep when aof is flushed, so that we have a chance to look
+ # at the aof size and detect if the response of an incr command
+ # arrives before the data was written (and hopefully fsynced)
+ # We create a big reply, which will hopefully not have room in the
+ # socket buffers, and will install a write handler, then we sleep
+ # a big and issue the incr command, hoping that the last portion of
+ # the output buffer write, and the processing of the incr will happen
+ # in the same event loop cycle.
+ # Since the socket buffers and timing are unpredictable, we fuzz this
+ # test with slightly different sizes and sleeps a few times.
+ for {set i 0} {$i < 10} {incr i} {
+ r debug aof-flush-sleep 0
+ r del x
+ r setrange x [expr {int(rand()*5000000)+10000000}] x
+ r debug aof-flush-sleep 500000
+ set aof [get_last_incr_aof_path r]
+ set size1 [file size $aof]
+ $rd get x
+ after [expr {int(rand()*30)}]
+ $rd incr new_value
+ $rd read
+ $rd read
+ set size2 [file size $aof]
+ assert {$size1 != $size2}
+ }
+ }
+ }
+
+ start_server {overrides {appendonly {yes}}} {
+ test {GETEX should not append to AOF} {
+ set aof [get_last_incr_aof_path r]
+ r set foo bar
+ set before [file size $aof]
+ r getex foo
+ set after [file size $aof]
+ assert_equal $before $after
+ }
+ }
+
+ ## Test that the server exits when the AOF contains a unknown command
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand set foo hello]
+ append_to_aof [formatCommand bla foo hello]
+ append_to_aof [formatCommand set foo hello]
+ }
+
+ start_server_aof [list dir $server_path aof-load-truncated yes] {
+ test "Unknown command: Server should have logged an error" {
+ set pattern "*Unknown command 'bla' reading the append only file*"
+ set retry 10
+ while {$retry} {
+ set result [exec tail -1 < [dict get $srv stdout]]
+ if {[string match $pattern $result]} {
+ break
+ }
+ incr retry -1
+ after 1000
+ }
+ if {$retry == 0} {
+ error "assertion:expected error not found on config file"
+ }
+ }
+ }
+
+ # Test that LMPOP/BLMPOP work fine with AOF.
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand lpush mylist a b c]
+ append_to_aof [formatCommand rpush mylist2 1 2 3]
+ append_to_aof [formatCommand lpush mylist3 a b c d e]
+ }
+
+ start_server_aof [list dir $server_path aof-load-truncated no] {
+ test "AOF+LMPOP/BLMPOP: pop elements from the list" {
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ set client2 [redis [dict get $srv host] [dict get $srv port] 1 $::tls]
+ wait_done_loading $client
+
+ # Pop all elements from mylist, should be blmpop delete mylist.
+ $client lmpop 1 mylist left count 1
+ $client blmpop 0 1 mylist left count 10
+
+ # Pop all elements from mylist2, should be lmpop delete mylist2.
+ $client blmpop 0 2 mylist mylist2 right count 10
+ $client lmpop 2 mylist mylist2 right count 2
+
+ # Blocking path, be blocked and then released.
+ $client2 blmpop 0 2 mylist mylist2 left count 2
+ after 100
+ $client lpush mylist2 a b c
+
+ # Pop up the last element in mylist2
+ $client blmpop 0 3 mylist mylist2 mylist3 left count 1
+
+ # Leave two elements in mylist3.
+ $client blmpop 0 3 mylist mylist2 mylist3 right count 3
+ }
+ }
+
+ start_server_aof [list dir $server_path aof-load-truncated no] {
+ test "AOF+LMPOP/BLMPOP: after pop elements from the list" {
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+
+ # mylist and mylist2 no longer exist.
+ assert_equal 0 [$client exists mylist mylist2]
+
+ # Length of mylist3 is two.
+ assert_equal 2 [$client llen mylist3]
+ }
+ }
+
+ # Test that ZMPOP/BZMPOP work fine with AOF.
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand zadd myzset 1 one 2 two 3 three]
+ append_to_aof [formatCommand zadd myzset2 4 four 5 five 6 six]
+ append_to_aof [formatCommand zadd myzset3 1 one 2 two 3 three 4 four 5 five]
+ }
+
+ start_server_aof [list dir $server_path aof-load-truncated no] {
+ test "AOF+ZMPOP/BZMPOP: pop elements from the zset" {
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ set client2 [redis [dict get $srv host] [dict get $srv port] 1 $::tls]
+ wait_done_loading $client
+
+ # Pop all elements from myzset, should be bzmpop delete myzset.
+ $client zmpop 1 myzset min count 1
+ $client bzmpop 0 1 myzset min count 10
+
+ # Pop all elements from myzset2, should be zmpop delete myzset2.
+ $client bzmpop 0 2 myzset myzset2 max count 10
+ $client zmpop 2 myzset myzset2 max count 2
+
+ # Blocking path, be blocked and then released.
+ $client2 bzmpop 0 2 myzset myzset2 min count 2
+ after 100
+ $client zadd myzset2 1 one 2 two 3 three
+
+ # Pop up the last element in myzset2
+ $client bzmpop 0 3 myzset myzset2 myzset3 min count 1
+
+ # Leave two elements in myzset3.
+ $client bzmpop 0 3 myzset myzset2 myzset3 max count 3
+ }
+ }
+
+ start_server_aof [list dir $server_path aof-load-truncated no] {
+ test "AOF+ZMPOP/BZMPOP: after pop elements from the zset" {
+ set client [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $client
+
+ # myzset and myzset2 no longer exist.
+ assert_equal 0 [$client exists myzset myzset2]
+
+ # Length of myzset3 is two.
+ assert_equal 2 [$client zcard myzset3]
+ }
+ }
+
+ test {Generate timestamp annotations in AOF} {
+ start_server {overrides {appendonly {yes}}} {
+ r config set aof-timestamp-enabled yes
+ r config set aof-use-rdb-preamble no
+ set aof [get_last_incr_aof_path r]
+
+ r set foo bar
+ assert_match "#TS:*" [exec head -n 1 $aof]
+
+ r bgrewriteaof
+ waitForBgrewriteaof r
+
+ set aof [get_base_aof_path r]
+ assert_match "#TS:*" [exec head -n 1 $aof]
+ }
+ }
+
+ # redis could load AOF which has timestamp annotations inside
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof "#TS:1628217470\r\n"
+ append_to_aof [formatCommand set foo1 bar1]
+ append_to_aof "#TS:1628217471\r\n"
+ append_to_aof [formatCommand set foo2 bar2]
+ append_to_aof "#TS:1628217472\r\n"
+ append_to_aof "#TS:1628217473\r\n"
+ append_to_aof [formatCommand set foo3 bar3]
+ append_to_aof "#TS:1628217474\r\n"
+ }
+ start_server_aof [list dir $server_path] {
+ test {Successfully load AOF which has timestamp annotations inside} {
+ set c [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $c
+ assert_equal "bar1" [$c get foo1]
+ assert_equal "bar2" [$c get foo2]
+ assert_equal "bar3" [$c get foo3]
+ }
+ }
+
+ test {Truncate AOF to specific timestamp} {
+ # truncate to timestamp 1628217473
+ exec src/redis-check-aof --truncate-to-timestamp 1628217473 $aof_manifest_file
+ start_server_aof [list dir $server_path] {
+ set c [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $c
+ assert_equal "bar1" [$c get foo1]
+ assert_equal "bar2" [$c get foo2]
+ assert_equal "bar3" [$c get foo3]
+ }
+
+ # truncate to timestamp 1628217471
+ exec src/redis-check-aof --truncate-to-timestamp 1628217471 $aof_manifest_file
+ start_server_aof [list dir $server_path] {
+ set c [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $c
+ assert_equal "bar1" [$c get foo1]
+ assert_equal "bar2" [$c get foo2]
+ assert_equal "" [$c get foo3]
+ }
+
+ # truncate to timestamp 1628217470
+ exec src/redis-check-aof --truncate-to-timestamp 1628217470 $aof_manifest_file
+ start_server_aof [list dir $server_path] {
+ set c [redis [dict get $srv host] [dict get $srv port] 0 $::tls]
+ wait_done_loading $c
+ assert_equal "bar1" [$c get foo1]
+ assert_equal "" [$c get foo2]
+ }
+
+ # truncate to timestamp 1628217469
+ catch {exec src/redis-check-aof --truncate-to-timestamp 1628217469 $aof_manifest_file} e
+ assert_match {*aborting*} $e
+ }
+
+ test {EVAL timeout with slow verbatim Lua script from AOF} {
+ start_server [list overrides [list dir $server_path appendonly yes lua-time-limit 1 aof-use-rdb-preamble no]] {
+ # generate a long running script that is propagated to the AOF as script
+ # make sure that the script times out during loading
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand select 9]
+ append_to_aof [formatCommand eval {redis.call('set',KEYS[1],'y'); for i=1,1500000 do redis.call('ping') end return 'ok'} 1 x]
+ }
+ set rd [redis_deferring_client]
+ $rd debug loadaof
+ $rd flush
+ wait_for_condition 100 10 {
+ [catch {r ping} e] == 1
+ } else {
+ fail "server didn't start loading"
+ }
+ assert_error {LOADING*} {r ping}
+ $rd read
+ $rd close
+ wait_for_log_messages 0 {"*Slow script detected*"} 0 100 100
+ assert_equal [r get x] y
+ }
+ }
+
+ test {EVAL can process writes from AOF in read-only replicas} {
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
+ }
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand select 9]
+ append_to_aof [formatCommand eval {redis.call("set",KEYS[1],"100")} 1 foo]
+ append_to_aof [formatCommand eval {redis.call("incr",KEYS[1])} 1 foo]
+ append_to_aof [formatCommand eval {redis.call("incr",KEYS[1])} 1 foo]
+ }
+ start_server [list overrides [list dir $server_path appendonly yes replica-read-only yes replicaof "127.0.0.1 0"]] {
+ assert_equal [r get foo] 102
+ }
+ }
+
+ test {Test redis-check-aof for old style resp AOF} {
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand set foo hello]
+ append_to_aof [formatCommand set bar world]
+ }
+
+ catch {
+ exec src/redis-check-aof $aof_file
+ } result
+ assert_match "*Start checking Old-Style AOF*is valid*" $result
+ }
+
+ test {Test redis-check-aof for old style rdb-preamble AOF} {
+ catch {
+ exec src/redis-check-aof tests/assets/rdb-preamble.aof
+ } result
+ assert_match "*Start checking Old-Style AOF*RDB preamble is OK, proceeding with AOF tail*is valid*" $result
+ }
+
+ test {Test redis-check-aof for Multi Part AOF with resp AOF base} {
+ create_aof $aof_dirpath $aof_base_file {
+ append_to_aof [formatCommand set foo hello]
+ append_to_aof [formatCommand set bar world]
+ }
+
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand set foo hello]
+ append_to_aof [formatCommand set bar world]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n"
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
+ }
+
+ catch {
+ exec src/redis-check-aof $aof_manifest_file
+ } result
+ assert_match "*Start checking Multi Part AOF*Start to check BASE AOF (RESP format)*BASE AOF*is valid*Start to check INCR files*INCR AOF*is valid*All AOF files and manifest are valid*" $result
+ }
+
+ test {Test redis-check-aof for Multi Part AOF with rdb-preamble AOF base} {
+ exec cp tests/assets/rdb-preamble.aof $aof_base_file
+
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand set foo hello]
+ append_to_aof [formatCommand set bar world]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n"
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
+ }
+
+ catch {
+ exec src/redis-check-aof $aof_manifest_file
+ } result
+ assert_match "*Start checking Multi Part AOF*Start to check BASE AOF (RDB format)*DB preamble is OK, proceeding with AOF tail*BASE AOF*is valid*Start to check INCR files*INCR AOF*is valid*All AOF files and manifest are valid*" $result
+ }
+
+ test {Test redis-check-aof only truncates the last file for Multi Part AOF in fix mode} {
+ create_aof $aof_dirpath $aof_base_file {
+ append_to_aof [formatCommand set foo hello]
+ append_to_aof [formatCommand multi]
+ append_to_aof [formatCommand set bar world]
+ }
+
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand set foo hello]
+ append_to_aof [formatCommand set bar world]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n"
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
+ }
+
+ catch {
+ exec src/redis-check-aof $aof_manifest_file
+ } result
+ assert_match "*not valid*" $result
+
+ catch {
+ exec src/redis-check-aof --fix $aof_manifest_file
+ } result
+ assert_match "*Failed to truncate AOF*because it is not the last file*" $result
+ }
+
+ test {Test redis-check-aof only truncates the last file for Multi Part AOF in truncate-to-timestamp mode} {
+ create_aof $aof_dirpath $aof_base_file {
+ append_to_aof "#TS:1628217470\r\n"
+ append_to_aof [formatCommand set foo1 bar1]
+ append_to_aof "#TS:1628217471\r\n"
+ append_to_aof [formatCommand set foo2 bar2]
+ append_to_aof "#TS:1628217472\r\n"
+ append_to_aof "#TS:1628217473\r\n"
+ append_to_aof [formatCommand set foo3 bar3]
+ append_to_aof "#TS:1628217474\r\n"
+ }
+
+ create_aof $aof_dirpath $aof_file {
+ append_to_aof [formatCommand set foo hello]
+ append_to_aof [formatCommand set bar world]
+ }
+
+ create_aof_manifest $aof_dirpath $aof_manifest_file {
+ append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n"
+ append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n"
+ }
+
+ catch {
+ exec src/redis-check-aof --truncate-to-timestamp 1628217473 $aof_manifest_file
+ } result
+ assert_match "*Failed to truncate AOF*to timestamp*because it is not the last file*" $result
+ }
+
+ start_server {overrides {appendonly yes appendfsync always}} {
+ test {FLUSHDB / FLUSHALL should persist in AOF} {
+ set aof [get_last_incr_aof_path r]
+
+ r set key value
+ r flushdb
+ r set key value2
+ r flushdb
+
+ # DB is empty
+ r flushdb
+ r flushdb
+ r flushdb
+
+ r set key value
+ r flushall
+ r set key value2
+ r flushall
+
+ # DBs are empty.
+ r flushall
+ r flushall
+ r flushall
+
+ # Assert that each FLUSHDB command is persisted even the DB is empty.
+ # Assert that each FLUSHALL command is persisted even the DBs are empty.
+ assert_aof_content $aof {
+ {select *}
+ {set key value}
+ {flushdb}
+ {set key value2}
+ {flushdb}
+ {flushdb}
+ {flushdb}
+ {flushdb}
+ {set key value}
+ {flushall}
+ {set key value2}
+ {flushall}
+ {flushall}
+ {flushall}
+ {flushall}
+ }
+ }
+ }
+}
diff --git a/tests/integration/block-repl.tcl b/tests/integration/block-repl.tcl
new file mode 100644
index 0000000..52b4a53
--- /dev/null
+++ b/tests/integration/block-repl.tcl
@@ -0,0 +1,51 @@
+# Test replication of blocking lists and zset operations.
+# Unlike stream operations such operations are "pop" style, so they consume
+# the list or sorted set, and must be replicated correctly.
+
+proc start_bg_block_op {host port db ops tls} {
+ set tclsh [info nameofexecutable]
+ exec $tclsh tests/helpers/bg_block_op.tcl $host $port $db $ops $tls &
+}
+
+proc stop_bg_block_op {handle} {
+ catch {exec /bin/kill -9 $handle}
+}
+
+start_server {tags {"repl" "external:skip"}} {
+ start_server {overrides {save {}}} {
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+ set slave [srv 0 client]
+
+ set load_handle0 [start_bg_block_op $master_host $master_port 9 100000 $::tls]
+ set load_handle1 [start_bg_block_op $master_host $master_port 9 100000 $::tls]
+ set load_handle2 [start_bg_block_op $master_host $master_port 9 100000 $::tls]
+
+ test {First server should have role slave after SLAVEOF} {
+ $slave slaveof $master_host $master_port
+ after 1000
+ s 0 role
+ } {slave}
+
+ test {Test replication with blocking lists and sorted sets operations} {
+ after 25000
+ stop_bg_block_op $load_handle0
+ stop_bg_block_op $load_handle1
+ stop_bg_block_op $load_handle2
+ wait_for_condition 100 100 {
+ [$master debug digest] == [$slave debug digest]
+ } else {
+ set csv1 [csvdump r]
+ set csv2 [csvdump {r -1}]
+ set fd [open /tmp/repldump1.txt w]
+ puts -nonewline $fd $csv1
+ close $fd
+ set fd [open /tmp/repldump2.txt w]
+ puts -nonewline $fd $csv2
+ close $fd
+ fail "Master - Replica inconsistency, Run diff -u against /tmp/repldump*.txt for more info"
+ }
+ }
+ }
+}
diff --git a/tests/integration/convert-ziplist-hash-on-load.tcl b/tests/integration/convert-ziplist-hash-on-load.tcl
new file mode 100644
index 0000000..c8265b2
--- /dev/null
+++ b/tests/integration/convert-ziplist-hash-on-load.tcl
@@ -0,0 +1,28 @@
+tags {"external:skip"} {
+
+# Copy RDB with ziplist encoded hash to server path
+set server_path [tmpdir "server.convert-ziplist-hash-on-load"]
+
+exec cp -f tests/assets/hash-ziplist.rdb $server_path
+start_server [list overrides [list "dir" $server_path "dbfilename" "hash-ziplist.rdb"]] {
+ test "RDB load ziplist hash: converts to listpack when RDB loading" {
+ r select 0
+
+ assert_encoding listpack hash
+ assert_equal 2 [r hlen hash]
+ assert_match {v1 v2} [r hmget hash f1 f2]
+ }
+}
+
+exec cp -f tests/assets/hash-ziplist.rdb $server_path
+start_server [list overrides [list "dir" $server_path "dbfilename" "hash-ziplist.rdb" "hash-max-ziplist-entries" 1]] {
+ test "RDB load ziplist hash: converts to hash table when hash-max-ziplist-entries is exceeded" {
+ r select 0
+
+ assert_encoding hashtable hash
+ assert_equal 2 [r hlen hash]
+ assert_match {v1 v2} [r hmget hash f1 f2]
+ }
+}
+
+}
diff --git a/tests/integration/convert-ziplist-zset-on-load.tcl b/tests/integration/convert-ziplist-zset-on-load.tcl
new file mode 100644
index 0000000..0fbb201
--- /dev/null
+++ b/tests/integration/convert-ziplist-zset-on-load.tcl
@@ -0,0 +1,28 @@
+tags {"external:skip"} {
+
+# Copy RDB with ziplist encoded hash to server path
+set server_path [tmpdir "server.convert-ziplist-hash-on-load"]
+
+exec cp -f tests/assets/zset-ziplist.rdb $server_path
+start_server [list overrides [list "dir" $server_path "dbfilename" "zset-ziplist.rdb"]] {
+ test "RDB load ziplist zset: converts to listpack when RDB loading" {
+ r select 0
+
+ assert_encoding listpack zset
+ assert_equal 2 [r zcard zset]
+ assert_match {one 1 two 2} [r zrange zset 0 -1 withscores]
+ }
+}
+
+exec cp -f tests/assets/zset-ziplist.rdb $server_path
+start_server [list overrides [list "dir" $server_path "dbfilename" "zset-ziplist.rdb" "zset-max-ziplist-entries" 1]] {
+ test "RDB load ziplist zset: converts to skiplist when zset-max-ziplist-entries is exceeded" {
+ r select 0
+
+ assert_encoding skiplist zset
+ assert_equal 2 [r zcard zset]
+ assert_match {one 1 two 2} [r zrange zset 0 -1 withscores]
+ }
+}
+
+}
diff --git a/tests/integration/convert-zipmap-hash-on-load.tcl b/tests/integration/convert-zipmap-hash-on-load.tcl
new file mode 100644
index 0000000..f7eda0e
--- /dev/null
+++ b/tests/integration/convert-zipmap-hash-on-load.tcl
@@ -0,0 +1,39 @@
+tags {"external:skip"} {
+
+# Copy RDB with zipmap encoded hash to server path
+set server_path [tmpdir "server.convert-zipmap-hash-on-load"]
+
+exec cp -f tests/assets/hash-zipmap.rdb $server_path
+start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb"]] {
+ test "RDB load zipmap hash: converts to listpack" {
+ r select 0
+
+ assert_match "*listpack*" [r debug object hash]
+ assert_equal 2 [r hlen hash]
+ assert_match {v1 v2} [r hmget hash f1 f2]
+ }
+}
+
+exec cp -f tests/assets/hash-zipmap.rdb $server_path
+start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb" "hash-max-ziplist-entries" 1]] {
+ test "RDB load zipmap hash: converts to hash table when hash-max-ziplist-entries is exceeded" {
+ r select 0
+
+ assert_match "*hashtable*" [r debug object hash]
+ assert_equal 2 [r hlen hash]
+ assert_match {v1 v2} [r hmget hash f1 f2]
+ }
+}
+
+exec cp -f tests/assets/hash-zipmap.rdb $server_path
+start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb" "hash-max-ziplist-value" 1]] {
+ test "RDB load zipmap hash: converts to hash table when hash-max-ziplist-value is exceeded" {
+ r select 0
+
+ assert_match "*hashtable*" [r debug object hash]
+ assert_equal 2 [r hlen hash]
+ assert_match {v1 v2} [r hmget hash f1 f2]
+ }
+}
+
+}
diff --git a/tests/integration/corrupt-dump-fuzzer.tcl b/tests/integration/corrupt-dump-fuzzer.tcl
new file mode 100644
index 0000000..9cd4ff9
--- /dev/null
+++ b/tests/integration/corrupt-dump-fuzzer.tcl
@@ -0,0 +1,230 @@
+# tests of corrupt listpack payload with valid CRC
+
+tags {"dump" "corruption" "external:skip"} {
+
+# catch sigterm so that in case one of the random command hangs the test,
+# usually due to redis not putting a response in the output buffers,
+# we'll know which command it was
+if { ! [ catch {
+ package require Tclx
+} err ] } {
+ signal error SIGTERM
+}
+
+proc generate_collections {suffix elements} {
+ set rd [redis_deferring_client]
+ for {set j 0} {$j < $elements} {incr j} {
+ # add both string values and integers
+ if {$j % 2 == 0} {set val $j} else {set val "_$j"}
+ $rd hset hash$suffix $j $val
+ $rd lpush list$suffix $val
+ $rd zadd zset$suffix $j $val
+ $rd sadd set$suffix $val
+ $rd xadd stream$suffix * item 1 value $val
+ }
+ for {set j 0} {$j < $elements * 5} {incr j} {
+ $rd read ; # Discard replies
+ }
+ $rd close
+}
+
+# generate keys with various types and encodings
+proc generate_types {} {
+ r config set list-max-ziplist-size 5
+ r config set hash-max-ziplist-entries 5
+ r config set set-max-listpack-entries 5
+ r config set zset-max-ziplist-entries 5
+ r config set stream-node-max-entries 5
+
+ # create small (ziplist / listpack encoded) objects with 3 items
+ generate_collections "" 3
+
+ # add some metadata to the stream
+ r xgroup create stream mygroup 0
+ set records [r xreadgroup GROUP mygroup Alice COUNT 2 STREAMS stream >]
+ r xdel stream [lindex [lindex [lindex [lindex $records 0] 1] 1] 0]
+ r xack stream mygroup [lindex [lindex [lindex [lindex $records 0] 1] 0] 0]
+
+ # create other non-collection types
+ r incr int
+ r set string str
+
+ # create bigger objects with 10 items (more than a single ziplist / listpack)
+ generate_collections big 10
+
+ # make sure our big stream also has a listpack record that has different
+ # field names than the master recorded
+ r xadd streambig * item 1 value 1
+ r xadd streambig * item 1 unique value
+}
+
+proc corrupt_payload {payload} {
+ set len [string length $payload]
+ set count 1 ;# usually corrupt only one byte
+ if {rand() > 0.9} { set count 2 }
+ while { $count > 0 } {
+ set idx [expr {int(rand() * $len)}]
+ set ch [binary format c [expr {int(rand()*255)}]]
+ set payload [string replace $payload $idx $idx $ch]
+ incr count -1
+ }
+ return $payload
+}
+
+# fuzzy tester for corrupt RESTORE payloads
+# valgrind will make sure there were no leaks in the rdb loader error handling code
+foreach sanitize_dump {no yes} {
+ if {$::accurate} {
+ set min_duration [expr {60 * 10}] ;# run at least 10 minutes
+ set min_cycles 1000 ;# run at least 1k cycles (max 16 minutes)
+ } else {
+ set min_duration 10 ; # run at least 10 seconds
+ set min_cycles 10 ; # run at least 10 cycles
+ }
+
+ # Don't execute this on FreeBSD due to a yet-undiscovered memory issue
+ # which causes tclsh to bloat.
+ if {[exec uname] == "FreeBSD"} {
+ set min_cycles 1
+ set min_duration 1
+ }
+
+ test "Fuzzer corrupt restore payloads - sanitize_dump: $sanitize_dump" {
+ if {$min_duration * 2 > $::timeout} {
+ fail "insufficient timeout"
+ }
+ # start a server, fill with data and save an RDB file once (avoid re-save)
+ start_server [list overrides [list "save" "" use-exit-on-panic yes crash-memcheck-enabled no loglevel verbose] ] {
+ set stdout [srv 0 stdout]
+ r config set sanitize-dump-payload $sanitize_dump
+ r debug set-skip-checksum-validation 1
+ set start_time [clock seconds]
+ generate_types
+ set dbsize [r dbsize]
+ r save
+ set cycle 0
+ set stat_terminated_in_restore 0
+ set stat_terminated_in_traffic 0
+ set stat_terminated_by_signal 0
+ set stat_successful_restore 0
+ set stat_rejected_restore 0
+ set stat_traffic_commands_sent 0
+ # repeatedly DUMP a random key, corrupt it and try RESTORE into a new key
+ while true {
+ set k [r randomkey]
+ set dump [r dump $k]
+ set dump [corrupt_payload $dump]
+ set printable_dump [string2printable $dump]
+ set restore_failed false
+ set report_and_restart false
+ set sent {}
+ # RESTORE can fail, but hopefully not terminate
+ if { [catch { r restore "_$k" 0 $dump REPLACE } err] } {
+ set restore_failed true
+ # skip if return failed with an error response.
+ if {[string match "ERR*" $err]} {
+ incr stat_rejected_restore
+ } else {
+ set report_and_restart true
+ incr stat_terminated_in_restore
+ write_log_line 0 "corrupt payload: $printable_dump"
+ if {$sanitize_dump == yes} {
+ puts "Server crashed in RESTORE with payload: $printable_dump"
+ }
+ }
+ } else {
+ r ping ;# an attempt to check if the server didn't terminate (this will throw an error that will terminate the tests)
+ }
+
+ set print_commands false
+ if {!$restore_failed} {
+ # if RESTORE didn't fail or terminate, run some random traffic on the new key
+ incr stat_successful_restore
+ if { [ catch {
+ set sent [generate_fuzzy_traffic_on_key "_$k" 1] ;# traffic for 1 second
+ incr stat_traffic_commands_sent [llength $sent]
+ r del "_$k" ;# in case the server terminated, here's where we'll detect it.
+ if {$dbsize != [r dbsize]} {
+ puts "unexpected keys"
+ puts "keys: [r keys *]"
+ puts "commands leading to it:"
+ foreach cmd $sent {
+ foreach arg $cmd {
+ puts -nonewline "[string2printable $arg] "
+ }
+ puts ""
+ }
+ exit 1
+ }
+ } err ] } {
+ set err [format "%s" $err] ;# convert to string for pattern matching
+ if {[string match "*SIGTERM*" $err]} {
+ puts "payload that caused test to hang: $printable_dump"
+ exit 1
+ }
+ # if the server terminated update stats and restart it
+ set report_and_restart true
+ incr stat_terminated_in_traffic
+ set by_signal [count_log_message 0 "crashed by signal"]
+ incr stat_terminated_by_signal $by_signal
+
+ if {$by_signal != 0 || $sanitize_dump == yes} {
+ puts "Server crashed (by signal: $by_signal), with payload: $printable_dump"
+ set print_commands true
+ }
+ }
+ }
+
+ # check valgrind report for invalid reads after each RESTORE
+ # payload so that we have a report that is easier to reproduce
+ set valgrind_errors [find_valgrind_errors [srv 0 stderr] false]
+ set asan_errors [sanitizer_errors_from_file [srv 0 stderr]]
+ if {$valgrind_errors != "" || $asan_errors != ""} {
+ puts "valgrind or asan found an issue for payload: $printable_dump"
+ set report_and_restart true
+ set print_commands true
+ }
+
+ if {$report_and_restart} {
+ if {$print_commands} {
+ puts "violating commands:"
+ foreach cmd $sent {
+ foreach arg $cmd {
+ puts -nonewline "[string2printable $arg] "
+ }
+ puts ""
+ }
+ }
+
+ # restart the server and re-apply debug configuration
+ write_log_line 0 "corrupt payload: $printable_dump"
+ restart_server 0 true true
+ r config set sanitize-dump-payload $sanitize_dump
+ r debug set-skip-checksum-validation 1
+ }
+
+ incr cycle
+ if { ([clock seconds]-$start_time) >= $min_duration && $cycle >= $min_cycles} {
+ break
+ }
+ }
+ if {$::verbose} {
+ puts "Done $cycle cycles in [expr {[clock seconds]-$start_time}] seconds."
+ puts "RESTORE: successful: $stat_successful_restore, rejected: $stat_rejected_restore"
+ puts "Total commands sent in traffic: $stat_traffic_commands_sent, crashes during traffic: $stat_terminated_in_traffic ($stat_terminated_by_signal by signal)."
+ }
+ }
+ # if we run sanitization we never expect the server to crash at runtime
+ if {$sanitize_dump == yes} {
+ assert_equal $stat_terminated_in_restore 0
+ assert_equal $stat_terminated_in_traffic 0
+ }
+ # make sure all terminations where due to assertion and not a SIGSEGV
+ assert_equal $stat_terminated_by_signal 0
+ }
+}
+
+
+
+} ;# tags
+
diff --git a/tests/integration/corrupt-dump.tcl b/tests/integration/corrupt-dump.tcl
new file mode 100644
index 0000000..3c9e5ce
--- /dev/null
+++ b/tests/integration/corrupt-dump.tcl
@@ -0,0 +1,833 @@
+# tests of corrupt ziplist payload with valid CRC
+# * setting crash-memcheck-enabled to no to avoid issues with valgrind
+# * setting use-exit-on-panic to yes so that valgrind can search for leaks
+# * setting debug set-skip-checksum-validation to 1 on some tests for which we
+# didn't bother to fake a valid checksum
+# * some tests set sanitize-dump-payload to no and some to yet, depending on
+# what we want to test
+
+tags {"dump" "corruption" "external:skip"} {
+
+# We only run OOM related tests on x86_64 and aarch64, as jemalloc on other
+# platforms (notably s390x) may actually succeed very large allocations. As
+# a result the test may hang for a very long time at the cleanup phase,
+# iterating as many as 2^61 hash table slots.
+
+set arch_name [exec uname -m]
+set run_oom_tests [expr {$arch_name == "x86_64" || $arch_name == "aarch64"}]
+
+set corrupt_payload_7445 "\x0E\x01\x1D\x1D\x00\x00\x00\x16\x00\x00\x00\x03\x00\x00\x04\x43\x43\x43\x43\x06\x04\x42\x42\x42\x42\x06\x3F\x41\x41\x41\x41\xFF\x09\x00\x88\xA5\xCA\xA8\xC5\x41\xF4\x35"
+
+test {corrupt payload: #7445 - with sanitize} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ catch {
+ r restore key 0 $corrupt_payload_7445
+ } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: hash with valid zip list header, invalid entry len} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ catch {
+ r restore key 0 "\x0D\x1B\x1B\x00\x00\x00\x16\x00\x00\x00\x04\x00\x00\x02\x61\x00\x04\x02\x62\x00\x04\x14\x63\x00\x04\x02\x64\x00\xFF\x09\x00\xD9\x10\x54\x92\x15\xF5\x5F\x52"
+ } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: invalid zlbytes header} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ catch {
+ r restore key 0 "\x0D\x1B\x25\x00\x00\x00\x16\x00\x00\x00\x04\x00\x00\x02\x61\x00\x04\x02\x62\x00\x04\x02\x63\x00\x04\x02\x64\x00\xFF\x09\x00\xB7\xF7\x6E\x9F\x43\x43\x14\xC6"
+ } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: valid zipped hash header, dup records} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ catch {
+ r restore key 0 "\x0D\x1B\x1B\x00\x00\x00\x16\x00\x00\x00\x04\x00\x00\x02\x61\x00\x04\x02\x62\x00\x04\x02\x61\x00\x04\x02\x64\x00\xFF\x09\x00\xA1\x98\x36\x78\xCC\x8E\x93\x2E"
+ } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: quicklist big ziplist prev len} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ catch {r restore key 0 "\x0E\x01\x13\x13\x00\x00\x00\x0E\x00\x00\x00\x02\x00\x00\x02\x61\x00\x0E\x02\x62\x00\xFF\x09\x00\x49\x97\x30\xB2\x0D\xA1\xED\xAA"} err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: quicklist small ziplist prev len} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ catch {
+ r restore key 0 "\x0E\x01\x13\x13\x00\x00\x00\x0E\x00\x00\x00\x02\x00\x00\x02\x61\x00\x02\x02\x62\x00\xFF\x09\x00\xC7\x71\x03\x97\x07\x75\xB0\x63"
+ } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: quicklist ziplist wrong count} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ catch {r restore key 0 "\x0E\x01\x13\x13\x00\x00\x00\x0E\x00\x00\x00\x03\x00\x00\x02\x61\x00\x04\x02\x62\x00\xFF\x09\x00\x4D\xE2\x0A\x2F\x08\x25\xDF\x91"} err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: #3080 - quicklist} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ catch {
+ r RESTORE key 0 "\x0E\x01\x80\x00\x00\x00\x10\x41\x41\x41\x41\x41\x41\x41\x41\x02\x00\x00\x80\x41\x41\x41\x41\x07\x00\x03\xC7\x1D\xEF\x54\x68\xCC\xF3"
+ r DUMP key ;# DUMP was used in the original issue, but now even with shallow sanitization restore safely fails, so this is dead code
+ } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: quicklist with empty ziplist} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch {r restore key 0 "\x0E\x01\x0B\x0B\x00\x00\x00\x0A\x00\x00\x00\x00\x00\xFF\x09\x00\xC2\x69\x37\x83\x3C\x7F\xFE\x6F" replace} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: quicklist encoded_len is 0} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ catch { r restore _list 0 "\x12\x01\x01\x00\x0a\x00\x8f\xc6\xc0\x57\x1c\x0a\xb3\x3c" replace } err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: quicklist listpack entry start with EOF} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ catch { r restore _list 0 "\x12\x01\x02\x0b\x0b\x00\x00\x00\x01\x00\x81\x61\x02\xff\xff\x0a\x00\x7e\xd8\xde\x5b\x0d\xd7\x70\xb8" replace } err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: #3080 - ziplist} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ # shallow sanitization is enough for restore to safely reject the payload with wrong size
+ r config set sanitize-dump-payload no
+ catch {
+ r RESTORE key 0 "\x0A\x80\x00\x00\x00\x10\x41\x41\x41\x41\x41\x41\x41\x41\x02\x00\x00\x80\x41\x41\x41\x41\x07\x00\x39\x5B\x49\xE0\xC1\xC6\xDD\x76"
+ } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: load corrupted rdb with no CRC - #3505} {
+ set server_path [tmpdir "server.rdb-corruption-test"]
+ exec cp tests/assets/corrupt_ziplist.rdb $server_path
+ set srv [start_server [list overrides [list "dir" $server_path "dbfilename" "corrupt_ziplist.rdb" loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no sanitize-dump-payload no]]]
+
+ # wait for termination
+ wait_for_condition 100 50 {
+ ! [is_alive $srv]
+ } else {
+ fail "rdb loading didn't fail"
+ }
+
+ set stdout [dict get $srv stdout]
+ assert_equal [count_message_lines $stdout "Terminating server after rdb file reading failure."] 1
+ assert_lessthan 1 [count_message_lines $stdout "integrity check failed"]
+ kill_server $srv ;# let valgrind look for issues
+}
+
+foreach sanitize_dump {no yes} {
+ test {corrupt payload: load corrupted rdb with empty keys} {
+ set server_path [tmpdir "server.rdb-corruption-empty-keys-test"]
+ exec cp tests/assets/corrupt_empty_keys.rdb $server_path
+ start_server [list overrides [list "dir" $server_path "dbfilename" "corrupt_empty_keys.rdb" "sanitize-dump-payload" $sanitize_dump]] {
+ r select 0
+ assert_equal [r dbsize] 0
+
+ verify_log_message 0 "*skipping empty key: set*" 0
+ verify_log_message 0 "*skipping empty key: list_quicklist*" 0
+ verify_log_message 0 "*skipping empty key: list_quicklist_empty_ziplist*" 0
+ verify_log_message 0 "*skipping empty key: list_ziplist*" 0
+ verify_log_message 0 "*skipping empty key: hash*" 0
+ verify_log_message 0 "*skipping empty key: hash_ziplist*" 0
+ verify_log_message 0 "*skipping empty key: zset*" 0
+ verify_log_message 0 "*skipping empty key: zset_ziplist*" 0
+ verify_log_message 0 "*skipping empty key: zset_listpack*" 0
+ verify_log_message 0 "*empty keys skipped: 9*" 0
+ }
+ }
+}
+
+test {corrupt payload: listpack invalid size header} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ catch {
+ r restore key 0 "\x0F\x01\x10\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x40\x55\x5F\x00\x00\x00\x0F\x00\x01\x01\x00\x01\x02\x01\x88\x31\x00\x00\x00\x00\x00\x00\x00\x09\x88\x32\x00\x00\x00\x00\x00\x00\x00\x09\x00\x01\x00\x01\x00\x01\x00\x01\x02\x02\x88\x31\x00\x00\x00\x00\x00\x00\x00\x09\x88\x61\x00\x00\x00\x00\x00\x00\x00\x09\x88\x32\x00\x00\x00\x00\x00\x00\x00\x09\x88\x62\x00\x00\x00\x00\x00\x00\x00\x09\x08\x01\xFF\x0A\x01\x00\x00\x09\x00\x45\x91\x0A\x87\x2F\xA5\xF9\x2E"
+ } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*Stream listpack integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: listpack too long entry len} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ catch {
+ r restore key 0 "\x0F\x01\x10\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x40\x55\x55\x00\x00\x00\x0F\x00\x01\x01\x00\x01\x02\x01\x88\x31\x00\x00\x00\x00\x00\x00\x00\x09\x88\x32\x00\x00\x00\x00\x00\x00\x00\x09\x00\x01\x00\x01\x00\x01\x00\x01\x02\x02\x89\x31\x00\x00\x00\x00\x00\x00\x00\x09\x88\x61\x00\x00\x00\x00\x00\x00\x00\x09\x88\x32\x00\x00\x00\x00\x00\x00\x00\x09\x88\x62\x00\x00\x00\x00\x00\x00\x00\x09\x08\x01\xFF\x0A\x01\x00\x00\x09\x00\x40\x63\xC9\x37\x03\xA2\xE5\x68"
+ } err
+ assert_equal [count_log_message 0 "crashed by signal"] 0
+ assert_equal [count_log_message 0 "ASSERTION FAILED"] 1
+ }
+}
+
+test {corrupt payload: listpack very long entry len} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ catch {
+ # This will catch migrated payloads from v6.2.x
+ r restore key 0 "\x0F\x01\x10\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x40\x55\x55\x00\x00\x00\x0F\x00\x01\x01\x00\x01\x02\x01\x88\x31\x00\x00\x00\x00\x00\x00\x00\x09\x88\x32\x00\x00\x00\x00\x00\x00\x00\x09\x00\x01\x00\x01\x00\x01\x00\x01\x02\x02\x88\x31\x00\x00\x00\x00\x00\x00\x00\x09\x88\x61\x00\x00\x00\x00\x00\x00\x00\x09\x88\x32\x00\x00\x00\x00\x00\x00\x00\x09\x9C\x62\x00\x00\x00\x00\x00\x00\x00\x09\x08\x01\xFF\x0A\x01\x00\x00\x09\x00\x63\x6F\x42\x8E\x7C\xB5\xA2\x9D"
+ } err
+ assert_equal [count_log_message 0 "crashed by signal"] 0
+ assert_equal [count_log_message 0 "ASSERTION FAILED"] 1
+ }
+}
+
+test {corrupt payload: listpack too long entry prev len} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ catch {
+ r restore key 0 "\x0F\x01\x10\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x40\x55\x55\x00\x00\x00\x0F\x00\x01\x01\x00\x15\x02\x01\x88\x31\x00\x00\x00\x00\x00\x00\x00\x09\x88\x32\x00\x00\x00\x00\x00\x00\x00\x09\x00\x01\x00\x01\x00\x01\x00\x01\x02\x02\x88\x31\x00\x00\x00\x00\x00\x00\x00\x09\x88\x61\x00\x00\x00\x00\x00\x00\x00\x09\x88\x32\x00\x00\x00\x00\x00\x00\x00\x09\x88\x62\x00\x00\x00\x00\x00\x00\x00\x09\x08\x01\xFF\x0A\x01\x00\x00\x09\x00\x06\xFB\x44\x24\x0A\x8E\x75\xEA"
+ } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*Stream listpack integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: stream with duplicate consumers} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ catch {
+ r restore key 0 "\x0F\x00\x00\x00\x00\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x00\x00\x00\x02\x04\x6E\x61\x6D\x65\x2A\x4C\xAA\x9A\x7D\x01\x00\x00\x00\x04\x6E\x61\x6D\x65\x2B\x4C\xAA\x9A\x7D\x01\x00\x00\x00\x0A\x00\xCC\xED\x8C\xA7\x62\xEE\xC7\xC8"
+ } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*Duplicate stream consumer detected*" 0
+ r ping
+ }
+}
+
+test {corrupt payload: hash ziplist with duplicate records} {
+ # when we do perform full sanitization, we expect duplicate records to fail the restore
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch { r RESTORE _hash 0 "\x0D\x3D\x3D\x00\x00\x00\x3A\x00\x00\x00\x14\x13\x00\xF5\x02\xF5\x02\xF2\x02\x53\x5F\x31\x04\xF3\x02\xF3\x02\xF7\x02\xF7\x02\xF8\x02\x02\x5F\x37\x04\xF1\x02\xF1\x02\xF6\x02\x02\x5F\x35\x04\xF4\x02\x02\x5F\x33\x04\xFA\x02\x02\x5F\x39\x04\xF9\x02\xF9\xFF\x09\x00\xB5\x48\xDE\x62\x31\xD0\xE5\x63" } err
+ assert_match "*Bad data format*" $err
+ }
+}
+
+test {corrupt payload: hash listpack with duplicate records} {
+ # when we do perform full sanitization, we expect duplicate records to fail the restore
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch { r RESTORE _hash 0 "\x10\x17\x17\x00\x00\x00\x04\x00\x82\x61\x00\x03\x82\x62\x00\x03\x82\x61\x00\x03\x82\x64\x00\x03\xff\x0a\x00\xc0\xcf\xa6\x87\xe5\xa7\xc5\xbe" } err
+ assert_match "*Bad data format*" $err
+ }
+}
+
+test {corrupt payload: hash listpack with duplicate records - convert} {
+ # when we do NOT perform full sanitization, but we convert to hash, we expect duplicate records panic
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r config set hash-max-listpack-entries 1
+ r debug set-skip-checksum-validation 1
+ catch { r RESTORE _hash 0 "\x10\x17\x17\x00\x00\x00\x04\x00\x82\x61\x00\x03\x82\x62\x00\x03\x82\x61\x00\x03\x82\x64\x00\x03\xff\x0a\x00\xc0\xcf\xa6\x87\xe5\xa7\xc5\xbe" } err
+ assert_equal [count_log_message 0 "crashed by signal"] 0
+ assert_equal [count_log_message 0 "listpack with dup elements"] 1
+ }
+}
+
+test {corrupt payload: hash ziplist uneven record count} {
+ # when we do NOT perform full sanitization, but shallow sanitization can detect uneven count
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch { r RESTORE _hash 0 "\r\x1b\x1b\x00\x00\x00\x16\x00\x00\x00\x04\x00\x00\x02a\x00\x04\x02b\x00\x04\x02a\x00\x04\x02d\x00\xff\t\x00\xa1\x98\x36x\xcc\x8e\x93\x2e" } err
+ assert_match "*Bad data format*" $err
+ }
+}
+
+test {corrupt payload: hash duplicate records} {
+ # when we do perform full sanitization, we expect duplicate records to fail the restore
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch { r RESTORE _hash 0 "\x04\x02\x01a\x01b\x01a\x01d\t\x00\xc6\x9c\xab\xbc\bk\x0c\x06" } err
+ assert_match "*Bad data format*" $err
+ }
+}
+
+test {corrupt payload: hash empty zipmap} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch { r RESTORE _hash 0 "\x09\x02\x00\xFF\x09\x00\xC0\xF1\xB8\x67\x4C\x16\xAC\xE3" } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*Zipmap integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: fuzzer findings - NPD in streamIteratorGetID} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch {
+ r RESTORE key 0 "\x0F\x01\x10\x00\x00\x01\x73\xBD\x68\x48\x71\x00\x00\x00\x00\x00\x00\x00\x00\x40\x42\x42\x00\x00\x00\x18\x00\x03\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x01\x02\x01\x00\x01\x00\x01\x01\x01\x00\x01\x05\x01\x02\x01\x00\x01\x01\x01\x01\x01\x82\x5F\x31\x03\x05\x01\x02\x01\x00\x01\x02\x01\x01\x01\x02\x01\x48\x01\xFF\x03\x81\x00\x00\x01\x73\xBD\x68\x48\x71\x02\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x73\xBD\x68\x48\x71\x00\x01\x00\x00\x01\x73\xBD\x68\x48\x71\x00\x00\x00\x00\x00\x00\x00\x00\x72\x48\x68\xBD\x73\x01\x00\x00\x01\x01\x05\x41\x6C\x69\x63\x65\x72\x48\x68\xBD\x73\x01\x00\x00\x01\x00\x00\x01\x73\xBD\x68\x48\x71\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x80\xCD\xB0\xD5\x1A\xCE\xFF\x10"
+ r XREVRANGE key 725 233
+ }
+ assert_equal [count_log_message 0 "crashed by signal"] 0
+ assert_equal [count_log_message 0 "ASSERTION FAILED"] 1
+ }
+}
+
+test {corrupt payload: fuzzer findings - listpack NPD on invalid stream} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch {
+ r RESTORE _stream 0 "\x0F\x01\x10\x00\x00\x01\x73\xDC\xB6\x6B\xF1\x00\x00\x00\x00\x00\x00\x00\x00\x40\x42\x42\x00\x00\x00\x18\x00\x03\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x01\x02\x01\x00\x01\x00\x01\x01\x01\x00\x01\x05\x01\x02\x01\x1F\x01\x00\x01\x01\x01\x6D\x5F\x31\x03\x05\x01\x02\x01\x29\x01\x00\x01\x01\x01\x02\x01\x05\x01\xFF\x03\x81\x00\x00\x01\x73\xDC\xB6\x6C\x1A\x00\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x73\xDC\xB6\x6B\xF1\x00\x01\x00\x00\x01\x73\xDC\xB6\x6B\xF1\x00\x00\x00\x00\x00\x00\x00\x00\x4B\x6C\xB6\xDC\x73\x01\x00\x00\x01\x01\x05\x41\x6C\x69\x63\x65\x3D\x6C\xB6\xDC\x73\x01\x00\x00\x01\x00\x00\x01\x73\xDC\xB6\x6B\xF1\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\xC7\x7D\x1C\xD7\x04\xFF\xE6\x9D"
+ r XREAD STREAMS _stream 519389898758
+ }
+ assert_equal [count_log_message 0 "crashed by signal"] 0
+ assert_equal [count_log_message 0 "ASSERTION FAILED"] 1
+ }
+}
+
+test {corrupt payload: fuzzer findings - NPD in quicklistIndex} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch {
+ r RESTORE key 0 "\x0E\x01\x13\x13\x00\x00\x00\x10\x00\x00\x00\x03\x12\x00\xF3\x02\x02\x5F\x31\x04\xF1\xFF\x09\x00\xC9\x4B\x31\xFE\x61\xC0\x96\xFE"
+ } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: fuzzer findings - encoded entry header reach outside the allocation} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r debug set-skip-checksum-validation 1
+ catch {
+ r RESTORE key 0 "\x0D\x19\x19\x00\x00\x00\x16\x00\x00\x00\x06\x00\x00\xF1\x02\xF1\x02\xF2\x02\x02\x5F\x31\x04\x99\x02\xF3\xFF\x09\x00\xC5\xB8\x10\xC0\x8A\xF9\x16\xDF"
+ } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+
+test {corrupt payload: fuzzer findings - invalid ziplist encoding} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch {
+ r RESTORE _listbig 0 "\x0E\x02\x1B\x1B\x00\x00\x00\x16\x00\x00\x00\x05\x00\x00\x02\x5F\x39\x04\xF9\x02\x86\x5F\x37\x04\xF7\x02\x02\x5F\x35\xFF\x19\x19\x00\x00\x00\x16\x00\x00\x00\x05\x00\x00\xF5\x02\x02\x5F\x33\x04\xF3\x02\x02\x5F\x31\x04\xF1\xFF\x09\x00\x0C\xFC\x99\x2C\x23\x45\x15\x60"
+ } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: fuzzer findings - hash crash} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ r RESTORE _hash 0 "\x0D\x19\x19\x00\x00\x00\x16\x00\x00\x00\x06\x00\x00\xF1\x02\xF1\x02\xF2\x02\x02\x5F\x31\x04\xF3\x02\xF3\xFF\x09\x00\x38\xB8\x10\xC0\x8A\xF9\x16\xDF"
+ r HSET _hash 394891450 1635910264
+ r HMGET _hash 887312884855
+ }
+}
+
+test {corrupt payload: fuzzer findings - uneven entry count in hash} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r debug set-skip-checksum-validation 1
+ catch {
+ r RESTORE _hashbig 0 "\x0D\x3D\x3D\x00\x00\x00\x38\x00\x00\x00\x14\x00\x00\xF2\x02\x02\x5F\x31\x04\x1C\x02\xF7\x02\xF1\x02\xF1\x02\xF5\x02\xF5\x02\xF4\x02\x02\x5F\x33\x04\xF6\x02\x02\x5F\x35\x04\xF8\x02\x02\x5F\x37\x04\xF9\x02\xF9\x02\xF3\x02\xF3\x02\xFA\x02\x02\x5F\x39\xFF\x09\x00\x73\xB7\x68\xC8\x97\x24\x8E\x88"
+ } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: fuzzer findings - invalid read in lzf_decompress} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch { r RESTORE _setbig 0 "\x02\x03\x02\x5F\x31\xC0\x02\xC3\x00\x09\x00\xE6\xDC\x76\x44\xFF\xEB\x3D\xFE" } err
+ assert_match "*Bad data format*" $err
+ }
+}
+
+test {corrupt payload: fuzzer findings - leak in rdbloading due to dup entry in set} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch { r RESTORE _setbig 0 "\x02\x0A\x02\x5F\x39\xC0\x06\x02\x5F\x31\xC0\x00\xC0\x04\x02\x5F\x35\xC0\x02\xC0\x08\x02\x5F\x31\x02\x5F\x33\x09\x00\x7A\x5A\xFB\x90\x3A\xE9\x3C\xBE" } err
+ assert_match "*Bad data format*" $err
+ }
+}
+
+test {corrupt payload: fuzzer findings - empty intset} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch {r RESTORE _setbig 0 "\x02\xC0\xC0\x06\x02\x5F\x39\xC0\x02\x02\x5F\x33\xC0\x00\x02\x5F\x31\xC0\x04\xC0\x08\x02\x5F\x37\x02\x5F\x35\x09\x00\xC5\xD4\x6D\xBA\xAD\x14\xB7\xE7"} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - zset ziplist entry lensize is 0} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch {r RESTORE _zsetbig 0 "\x0C\x3D\x3D\x00\x00\x00\x3A\x00\x00\x00\x14\x00\x00\xF1\x02\xF1\x02\x02\x5F\x31\x04\xF2\x02\xF3\x02\xF3\x02\x02\x5F\x33\x04\xF4\x02\xEE\x02\xF5\x02\x02\x5F\x35\x04\xF6\x02\xF7\x02\xF7\x02\x02\x5F\x37\x04\xF8\x02\xF9\x02\xF9\x02\x02\x5F\x39\x04\xFA\xFF\x09\x00\xAE\xF9\x77\x2A\x47\x24\x33\xF6"} err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*Zset ziplist integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: fuzzer findings - valgrind ziplist prevlen reaches outside the ziplist} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch {r RESTORE _listbig 0 "\x0E\x02\x1B\x1B\x00\x00\x00\x16\x00\x00\x00\x05\x00\x00\x02\x5F\x39\x04\xF9\x02\x02\x5F\x37\x04\xF7\x02\x02\x5F\x35\xFF\x19\x19\x00\x00\x00\x16\x00\x00\x00\x05\x00\x00\xF5\x02\x02\x5F\x33\x04\xF3\x95\x02\x5F\x31\x04\xF1\xFF\x09\x00\x0C\xFC\x99\x2C\x23\x45\x15\x60"} err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: fuzzer findings - valgrind - bad rdbLoadDoubleValue} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch { r RESTORE _list 0 "\x03\x01\x11\x11\x00\x00\x00\x0A\x00\x00\x00\x01\x00\x00\xD0\x07\x1A\xE9\x02\xFF\x09\x00\x1A\x06\x07\x32\x41\x28\x3A\x46" } err
+ assert_match "*Bad data format*" $err
+ }
+}
+
+test {corrupt payload: fuzzer findings - valgrind ziplist prev too big} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch {r RESTORE _list 0 "\x0E\x01\x13\x13\x00\x00\x00\x10\x00\x00\x00\x03\x00\x00\xF3\x02\x02\x5F\x31\xC1\xF1\xFF\x09\x00\xC9\x4B\x31\xFE\x61\xC0\x96\xFE"} err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: fuzzer findings - lzf decompression fails, avoid valgrind invalid read} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch {r RESTORE _stream 0 "\x0F\x02\x10\x00\x00\x01\x73\xDD\xAA\x2A\xB9\x00\x00\x00\x00\x00\x00\x00\x00\xC3\x40\x4B\x40\x5C\x18\x5C\x00\x00\x00\x24\x00\x05\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x00\x01\x20\x03\x00\x05\x20\x1C\x40\x07\x05\x01\x01\x82\x5F\x31\x03\x80\x0D\x40\x00\x00\x02\x60\x19\x40\x27\x40\x19\x00\x33\x60\x19\x40\x29\x02\x01\x01\x04\x20\x19\x00\xFF\x10\x00\x00\x01\x73\xDD\xAA\x2A\xBC\x00\x00\x00\x00\x00\x00\x00\x00\xC3\x40\x4D\x40\x5E\x18\x5E\x00\x00\x00\x24\x00\x05\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x06\x01\x01\x82\x5F\x35\x03\x05\x20\x1E\x17\x0B\x03\x01\x01\x06\x01\x40\x0B\x00\x01\x60\x0D\x02\x82\x5F\x37\x60\x19\x80\x00\x00\x08\x60\x19\x80\x27\x02\x82\x5F\x39\x20\x19\x00\xFF\x0A\x81\x00\x00\x01\x73\xDD\xAA\x2A\xBE\x00\x00\x09\x00\x21\x85\x77\x43\x71\x7B\x17\x88"} err
+ assert_match "*Bad data format*" $err
+ }
+}
+
+test {corrupt payload: fuzzer findings - stream bad lp_count} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch { r RESTORE _stream 0 "\x0F\x01\x10\x00\x00\x01\x73\xDE\xDF\x7D\x9B\x00\x00\x00\x00\x00\x00\x00\x00\x40\x42\x42\x00\x00\x00\x18\x00\x03\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x01\x02\x01\x00\x01\x00\x01\x01\x01\x00\x01\x56\x01\x02\x01\x22\x01\x00\x01\x01\x01\x82\x5F\x31\x03\x05\x01\x02\x01\x2C\x01\x00\x01\x01\x01\x02\x01\x05\x01\xFF\x03\x81\x00\x00\x01\x73\xDE\xDF\x7D\xC7\x00\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x73\xDE\xDF\x7D\x9B\x00\x01\x00\x00\x01\x73\xDE\xDF\x7D\x9B\x00\x00\x00\x00\x00\x00\x00\x00\xF9\x7D\xDF\xDE\x73\x01\x00\x00\x01\x01\x05\x41\x6C\x69\x63\x65\xEB\x7D\xDF\xDE\x73\x01\x00\x00\x01\x00\x00\x01\x73\xDE\xDF\x7D\x9B\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\xB2\xA8\xA7\x5F\x1B\x61\x72\xD5"} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - stream bad lp_count - unsanitized} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ r RESTORE _stream 0 "\x0F\x01\x10\x00\x00\x01\x73\xDE\xDF\x7D\x9B\x00\x00\x00\x00\x00\x00\x00\x00\x40\x42\x42\x00\x00\x00\x18\x00\x03\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x01\x02\x01\x00\x01\x00\x01\x01\x01\x00\x01\x56\x01\x02\x01\x22\x01\x00\x01\x01\x01\x82\x5F\x31\x03\x05\x01\x02\x01\x2C\x01\x00\x01\x01\x01\x02\x01\x05\x01\xFF\x03\x81\x00\x00\x01\x73\xDE\xDF\x7D\xC7\x00\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x73\xDE\xDF\x7D\x9B\x00\x01\x00\x00\x01\x73\xDE\xDF\x7D\x9B\x00\x00\x00\x00\x00\x00\x00\x00\xF9\x7D\xDF\xDE\x73\x01\x00\x00\x01\x01\x05\x41\x6C\x69\x63\x65\xEB\x7D\xDF\xDE\x73\x01\x00\x00\x01\x00\x00\x01\x73\xDE\xDF\x7D\x9B\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\xB2\xA8\xA7\x5F\x1B\x61\x72\xD5"
+ catch { r XREVRANGE _stream 638932639 738}
+ assert_equal [count_log_message 0 "crashed by signal"] 0
+ assert_equal [count_log_message 0 "ASSERTION FAILED"] 1
+ }
+}
+
+test {corrupt payload: fuzzer findings - stream integrity check issue} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch { r RESTORE _stream 0 "\x0F\x02\x10\x00\x00\x01\x75\x2D\xA2\x90\x67\x00\x00\x00\x00\x00\x00\x00\x00\xC3\x40\x4F\x40\x5C\x18\x5C\x00\x00\x00\x24\x00\x05\x01\x00\x01\x4A\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x00\x01\x20\x03\x00\x05\x20\x1C\x40\x09\x05\x01\x01\x82\x5F\x31\x03\x80\x0D\x00\x02\x20\x0D\x00\x02\xA0\x19\x00\x03\x20\x0B\x02\x82\x5F\x33\xA0\x19\x00\x04\x20\x0D\x00\x04\x20\x19\x00\xFF\x10\x00\x00\x01\x75\x2D\xA2\x90\x67\x00\x00\x00\x00\x00\x00\x00\x05\xC3\x40\x56\x40\x60\x18\x60\x00\x00\x00\x24\x00\x05\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x06\x01\x01\x82\x5F\x35\x03\x05\x20\x1E\x40\x0B\x03\x01\x01\x06\x01\x80\x0B\x00\x02\x20\x0B\x02\x82\x5F\x37\x60\x19\x03\x01\x01\xDF\xFB\x20\x05\x00\x08\x60\x1A\x20\x0C\x00\xFC\x20\x05\x02\x82\x5F\x39\x20\x1B\x00\xFF\x0A\x81\x00\x00\x01\x75\x2D\xA2\x90\x68\x01\x00\x09\x00\x1D\x6F\xC0\x69\x8A\xDE\xF7\x92" } err
+ assert_match "*Bad data format*" $err
+ }
+}
+
+test {corrupt payload: fuzzer findings - infinite loop} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ r RESTORE _stream 0 "\x0F\x01\x10\x00\x00\x01\x75\x3A\xA6\xD0\x93\x00\x00\x00\x00\x00\x00\x00\x00\x40\x42\x42\x00\x00\x00\x18\x00\x03\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x01\x02\x01\x00\x01\x00\x01\x01\x01\x00\x01\x05\x01\x02\x01\x00\x01\x01\x01\x01\x01\x82\x5F\x31\x03\xFD\x01\x02\x01\x00\x01\x02\x01\x01\x01\x02\x01\x05\x01\xFF\x03\x81\x00\x00\x01\x75\x3A\xA6\xD0\x93\x02\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x75\x3A\xA6\xD0\x93\x00\x01\x00\x00\x01\x75\x3A\xA6\xD0\x93\x00\x00\x00\x00\x00\x00\x00\x00\x94\xD0\xA6\x3A\x75\x01\x00\x00\x01\x01\x05\x41\x6C\x69\x63\x65\x94\xD0\xA6\x3A\x75\x01\x00\x00\x01\x00\x00\x01\x75\x3A\xA6\xD0\x93\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\xC4\x09\xAD\x69\x7E\xEE\xA6\x2F"
+ catch { r XREVRANGE _stream 288270516 971031845 }
+ assert_equal [count_log_message 0 "crashed by signal"] 0
+ assert_equal [count_log_message 0 "ASSERTION FAILED"] 1
+ }
+}
+
+test {corrupt payload: fuzzer findings - hash ziplist too long entry len} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r debug set-skip-checksum-validation 1
+ catch {
+ r RESTORE _hash 0 "\x0D\x3D\x3D\x00\x00\x00\x3A\x00\x00\x00\x14\x13\x00\xF5\x02\xF5\x02\xF2\x02\x53\x5F\x31\x04\xF3\x02\xF3\x02\xF7\x02\xF7\x02\xF8\x02\x02\x5F\x37\x04\xF1\x02\xF1\x02\xF6\x02\x02\x5F\x35\x04\xF4\x02\x02\x5F\x33\x04\xFA\x02\x02\x5F\x39\x04\xF9\x02\xF9\xFF\x09\x00\xB5\x48\xDE\x62\x31\xD0\xE5\x63"
+ } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+if {$run_oom_tests} {
+
+test {corrupt payload: OOM in rdbGenericLoadStringObject} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ catch { r RESTORE x 0 "\x0A\x81\x7F\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x13\x00\x00\x00\x0E\x00\x00\x00\x02\x00\x00\x02\x61\x00\x04\x02\x62\x00\xFF\x09\x00\x57\x04\xE5\xCD\xD4\x37\x6C\x57" } err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - OOM in dictExpand} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch { r RESTORE x 0 "\x02\x81\x02\x5F\x31\xC0\x00\xC0\x02\x09\x00\xCD\x84\x2C\xB7\xE8\xA4\x49\x57" } err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+}
+
+test {corrupt payload: fuzzer findings - zset ziplist invalid tail offset} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch {r RESTORE _zset 0 "\x0C\x19\x19\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\xF1\x02\xF1\x02\x02\x5F\x31\x04\xF2\x02\xF3\x02\xF3\xFF\x09\x00\x4D\x72\x7B\x97\xCD\x9A\x70\xC1"} err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*Zset ziplist integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: fuzzer findings - negative reply length} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ r RESTORE _stream 0 "\x0F\x01\x10\x00\x00\x01\x75\xCF\xA1\x16\xA7\x00\x00\x00\x00\x00\x00\x00\x00\x40\x42\x42\x00\x00\x00\x18\x00\x03\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x01\x02\x01\x00\x01\x00\x01\x01\x01\x00\x01\x05\x01\x02\x01\x00\x01\x01\x01\x01\x01\x14\x5F\x31\x03\x05\x01\x02\x01\x00\x01\x02\x01\x01\x01\x02\x01\x05\x01\xFF\x03\x81\x00\x00\x01\x75\xCF\xA1\x16\xA7\x02\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x75\xCF\xA1\x16\xA7\x01\x01\x00\x00\x01\x75\xCF\xA1\x16\xA7\x00\x00\x00\x00\x00\x00\x00\x01\xA7\x16\xA1\xCF\x75\x01\x00\x00\x01\x01\x05\x41\x6C\x69\x63\x65\xA7\x16\xA1\xCF\x75\x01\x00\x00\x01\x00\x00\x01\x75\xCF\xA1\x16\xA7\x00\x00\x00\x00\x00\x00\x00\x01\x09\x00\x1B\x42\x52\xB8\xDD\x5C\xE5\x4E"
+ catch {r XADD _stream * -956 -2601503852}
+ catch {r XINFO STREAM _stream FULL}
+ assert_equal [count_log_message 0 "crashed by signal"] 0
+ assert_equal [count_log_message 0 "ASSERTION FAILED"] 1
+ }
+}
+
+test {corrupt payload: fuzzer findings - valgrind negative malloc} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch {r RESTORE _key 0 "\x0E\x01\x81\xD6\xD6\x00\x00\x00\x0A\x00\x00\x00\x01\x00\x00\x40\xC8\x6F\x2F\x36\xE2\xDF\xE3\x2E\x26\x64\x8B\x87\xD1\x7A\xBD\xFF\xEF\xEF\x63\x65\xF6\xF8\x8C\x4E\xEC\x96\x89\x56\x88\xF8\x3D\x96\x5A\x32\xBD\xD1\x36\xD8\x02\xE6\x66\x37\xCB\x34\x34\xC4\x52\xA7\x2A\xD5\x6F\x2F\x7E\xEE\xA2\x94\xD9\xEB\xA9\x09\x38\x3B\xE1\xA9\x60\xB6\x4E\x09\x44\x1F\x70\x24\xAA\x47\xA8\x6E\x30\xE1\x13\x49\x4E\xA1\x92\xC4\x6C\xF0\x35\x83\xD9\x4F\xD9\x9C\x0A\x0D\x7A\xE7\xB1\x61\xF5\xC1\x2D\xDC\xC3\x0E\x87\xA6\x80\x15\x18\xBA\x7F\x72\xDD\x14\x75\x46\x44\x0B\xCA\x9C\x8F\x1C\x3C\xD7\xDA\x06\x62\x18\x7E\x15\x17\x24\xAB\x45\x21\x27\xC2\xBC\xBB\x86\x6E\xD8\xBD\x8E\x50\xE0\xE0\x88\xA4\x9B\x9D\x15\x2A\x98\xFF\x5E\x78\x6C\x81\xFC\xA8\xC9\xC8\xE6\x61\xC8\xD1\x4A\x7F\x81\xD6\xA6\x1A\xAD\x4C\xC1\xA2\x1C\x90\x68\x15\x2A\x8A\x36\xC0\x58\xC3\xCC\xA6\x54\x19\x12\x0F\xEB\x46\xFF\x6E\xE3\xA7\x92\xF8\xFF\x09\x00\xD0\x71\xF7\x9F\xF7\x6A\xD6\x2E"} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - valgrind invalid read} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch {r RESTORE _key 0 "\x05\x0A\x02\x5F\x39\x00\x00\x00\x00\x00\x00\x22\x40\xC0\x08\x00\x00\x00\x00\x00\x00\x20\x40\x02\x5F\x37\x00\x00\x00\x00\x00\x00\x1C\x40\xC0\x06\x00\x00\x00\x00\x00\x00\x18\x40\x02\x5F\x33\x00\x00\x00\x00\x00\x00\x14\x40\xC0\x04\x00\x00\x00\x00\x00\x00\x10\x40\x02\x5F\x33\x00\x00\x00\x00\x00\x00\x08\x40\xC0\x02\x00\x00\x00\x00\x00\x00\x00\x40\x02\x5F\x31\x00\x00\x00\x00\x00\x00\xF0\x3F\xC0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x3C\x66\xD7\x14\xA9\xDA\x3C\x69"} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - empty hash ziplist} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch {r RESTORE _int 0 "\x04\xC0\x01\x09\x00\xF6\x8A\xB6\x7A\x85\x87\x72\x4D"} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - stream with no records} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ r restore _stream 0 "\x0F\x01\x10\x00\x00\x01\x78\x4D\x55\x68\x09\x00\x00\x00\x00\x00\x00\x00\x00\x40\x42\x42\x00\x00\x00\x18\x00\x02\x01\x01\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x01\x02\x01\x00\x01\x00\x01\x01\x01\x00\x01\x05\x01\x03\x01\x3E\x01\x00\x01\x01\x01\x82\x5F\x31\x03\x05\x01\x02\x01\x50\x01\x00\x01\x01\x01\x02\x01\x05\x23\xFF\x02\x81\x00\x00\x01\x78\x4D\x55\x68\x59\x00\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x78\x4D\x55\x68\x47\x00\x01\x00\x00\x01\x78\x4D\x55\x68\x47\x00\x00\x00\x00\x00\x00\x00\x00\x9F\x68\x55\x4D\x78\x01\x00\x00\x01\x01\x05\x41\x6C\x69\x63\x65\x85\x68\x55\x4D\x78\x01\x00\x00\x01\x00\x00\x01\x78\x4D\x55\x68\x47\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\xF1\xC0\x72\x70\x39\x40\x1E\xA9" replace
+ catch {r XREAD STREAMS _stream $}
+ assert_equal [count_log_message 0 "crashed by signal"] 0
+ assert_equal [count_log_message 0 "Guru Meditation"] 1
+ }
+}
+
+test {corrupt payload: fuzzer findings - quicklist ziplist tail followed by extra data which start with 0xff} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch {
+ r restore key 0 "\x0E\x01\x11\x11\x00\x00\x00\x0A\x00\x00\x00\x01\x00\x00\xF6\xFF\xB0\x6C\x9C\xFF\x09\x00\x9C\x37\x47\x49\x4D\xDE\x94\xF5" replace
+ } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: fuzzer findings - dict init to huge size} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch {r restore key 0 "\x02\x81\xC0\x00\x02\x5F\x31\xC0\x02\x09\x00\xB2\x1B\xE5\x17\x2E\x15\xF4\x6C" replace} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - huge string} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch {r restore key 0 "\x00\x81\x01\x09\x00\xF6\x2B\xB6\x7A\x85\x87\x72\x4D"} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - stream PEL without consumer} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch {r restore _stream 0 "\x0F\x01\x10\x00\x00\x01\x7B\x08\xF0\xB2\x34\x00\x00\x00\x00\x00\x00\x00\x00\xC3\x3B\x40\x42\x19\x42\x00\x00\x00\x18\x00\x02\x01\x01\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x20\x10\x00\x00\x20\x01\x00\x01\x20\x03\x02\x05\x01\x03\x20\x05\x40\x00\x04\x82\x5F\x31\x03\x05\x60\x19\x80\x32\x02\x05\x01\xFF\x02\x81\x00\x00\x01\x7B\x08\xF0\xB2\x34\x02\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x7B\x08\xF0\xB2\x34\x01\x01\x00\x00\x01\x7B\x08\xF0\xB2\x34\x00\x00\x00\x00\x00\x00\x00\x01\x35\xB2\xF0\x08\x7B\x01\x00\x00\x01\x01\x13\x41\x6C\x69\x63\x65\x35\xB2\xF0\x08\x7B\x01\x00\x00\x01\x00\x00\x01\x7B\x08\xF0\xB2\x34\x00\x00\x00\x00\x00\x00\x00\x01\x09\x00\x28\x2F\xE0\xC5\x04\xBB\xA7\x31"} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - stream listpack valgrind issue} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ r restore _stream 0 "\x0F\x01\x10\x00\x00\x01\x7B\x09\x5E\x94\xFF\x00\x00\x00\x00\x00\x00\x00\x00\x40\x42\x42\x00\x00\x00\x18\x00\x02\x01\x01\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x01\x02\x01\x00\x01\x00\x01\x01\x01\x00\x01\x05\x01\x03\x01\x25\x01\x00\x01\x01\x01\x82\x5F\x31\x03\x05\x01\x02\x01\x32\x01\x00\x01\x01\x01\x02\x01\xF0\x01\xFF\x02\x81\x00\x00\x01\x7B\x09\x5E\x95\x31\x00\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x7B\x09\x5E\x95\x24\x00\x01\x00\x00\x01\x7B\x09\x5E\x95\x24\x00\x00\x00\x00\x00\x00\x00\x00\x5C\x95\x5E\x09\x7B\x01\x00\x00\x01\x01\x05\x41\x6C\x69\x63\x65\x4B\x95\x5E\x09\x7B\x01\x00\x00\x01\x00\x00\x01\x7B\x09\x5E\x95\x24\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\x19\x29\x94\xDF\x76\xF8\x1A\xC6"
+ catch {r XINFO STREAM _stream FULL }
+ assert_equal [count_log_message 0 "crashed by signal"] 0
+ assert_equal [count_log_message 0 "ASSERTION FAILED"] 1
+ }
+}
+
+test {corrupt payload: fuzzer findings - stream with bad lpFirst} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch {r restore _stream 0 "\x0F\x01\x10\x00\x00\x01\x7B\x0E\x52\xD2\xEC\x00\x00\x00\x00\x00\x00\x00\x00\x40\x42\x42\x00\x00\x00\x18\x00\x02\xF7\x01\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x01\x02\x01\x00\x01\x00\x01\x01\x01\x00\x01\x05\x01\x03\x01\x01\x01\x00\x01\x01\x01\x82\x5F\x31\x03\x05\x01\x02\x01\x01\x01\x01\x01\x01\x01\x02\x01\x05\x01\xFF\x02\x81\x00\x00\x01\x7B\x0E\x52\xD2\xED\x01\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x7B\x0E\x52\xD2\xED\x00\x01\x00\x00\x01\x7B\x0E\x52\xD2\xED\x00\x00\x00\x00\x00\x00\x00\x00\xED\xD2\x52\x0E\x7B\x01\x00\x00\x01\x01\x05\x41\x6C\x69\x63\x65\xED\xD2\x52\x0E\x7B\x01\x00\x00\x01\x00\x00\x01\x7B\x0E\x52\xD2\xED\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\xAC\x05\xC9\x97\x5D\x45\x80\xB3"} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - stream listpack lpPrev valgrind issue} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ r restore _stream 0 "\x0F\x01\x10\x00\x00\x01\x7B\x0E\xAE\x66\x36\x00\x00\x00\x00\x00\x00\x00\x00\x40\x42\x42\x00\x00\x00\x18\x00\x02\x01\x01\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x01\x02\x01\x00\x01\x00\x01\x01\x01\x00\x01\x1D\x01\x03\x01\x24\x01\x00\x01\x01\x69\x82\x5F\x31\x03\x05\x01\x02\x01\x33\x01\x00\x01\x01\x01\x02\x01\x05\x01\xFF\x02\x81\x00\x00\x01\x7B\x0E\xAE\x66\x69\x00\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x7B\x0E\xAE\x66\x5A\x00\x01\x00\x00\x01\x7B\x0E\xAE\x66\x5A\x00\x00\x00\x00\x00\x00\x00\x00\x94\x66\xAE\x0E\x7B\x01\x00\x00\x01\x01\x05\x41\x6C\x69\x63\x65\x83\x66\xAE\x0E\x7B\x01\x00\x00\x01\x00\x00\x01\x7B\x0E\xAE\x66\x5A\x00\x00\x00\x00\x00\x00\x00\x00\x09\x00\xD5\xD7\xA5\x5C\x63\x1C\x09\x40"
+ catch {r XREVRANGE _stream 1618622681 606195012389}
+ assert_equal [count_log_message 0 "crashed by signal"] 0
+ assert_equal [count_log_message 0 "ASSERTION FAILED"] 1
+ }
+}
+
+test {corrupt payload: fuzzer findings - stream with non-integer entry id} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch {r restore _streambig 0 "\x0F\x03\x10\x00\x00\x01\x7B\x13\x34\xC3\xB2\x00\x00\x00\x00\x00\x00\x00\x00\xC3\x40\x4F\x40\x5C\x18\x5C\x00\x00\x00\x24\x00\x05\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x80\x20\x01\x00\x01\x20\x03\x00\x05\x20\x1C\x40\x09\x05\x01\x01\x82\x5F\x31\x03\x80\x0D\x00\x02\x20\x0D\x00\x02\xA0\x19\x00\x03\x20\x0B\x02\x82\x5F\x33\xA0\x19\x00\x04\x20\x0D\x00\x04\x20\x19\x00\xFF\x10\x00\x00\x01\x7B\x13\x34\xC3\xB2\x00\x00\x00\x00\x00\x00\x00\x05\xC3\x40\x56\x40\x61\x18\x61\x00\x00\x00\x24\x00\x05\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x06\x01\x01\x82\x5F\x35\x03\x05\x20\x1E\x40\x0B\x03\x01\x01\x06\x01\x40\x0B\x03\x01\x01\xDF\xFB\x20\x05\x02\x82\x5F\x37\x60\x1A\x20\x0E\x00\xFC\x20\x05\x00\x08\xC0\x1B\x00\xFD\x20\x0C\x02\x82\x5F\x39\x20\x1B\x00\xFF\x10\x00\x00\x01\x7B\x13\x34\xC3\xB3\x00\x00\x00\x00\x00\x00\x00\x03\xC3\x3D\x40\x4A\x18\x4A\x00\x00\x00\x15\x00\x02\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x40\x00\x00\x05\x60\x07\x02\xDF\xFD\x02\xC0\x23\x09\x01\x01\x86\x75\x6E\x69\x71\x75\x65\x07\xA0\x2D\x02\x08\x01\xFF\x0C\x81\x00\x00\x01\x7B\x13\x34\xC3\xB4\x00\x00\x09\x00\x9D\xBD\xD5\xB9\x33\xC4\xC5\xFF"} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - empty quicklist} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch {
+ r restore key 0 "\x0E\xC0\x2B\x15\x00\x00\x00\x0A\x00\x00\x00\x01\x00\x00\xE0\x62\x58\xEA\xDF\x22\x00\x00\x00\xFF\x09\x00\xDF\x35\xD2\x67\xDC\x0E\x89\xAB" replace
+ } err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - empty zset} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch {r restore key 0 "\x05\xC0\x01\x09\x00\xF6\x8A\xB6\x7A\x85\x87\x72\x4D"} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - hash with len of 0} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch {r restore key 0 "\x04\xC0\x21\x09\x00\xF6\x8A\xB6\x7A\x85\x87\x72\x4D"} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - hash listpack first element too long entry len} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r debug set-skip-checksum-validation 1
+ r config set sanitize-dump-payload yes
+ catch { r restore _hash 0 "\x10\x15\x15\x00\x00\x00\x06\x00\xF0\x01\x00\x01\x01\x01\x82\x5F\x31\x03\x02\x01\x02\x01\xFF\x0A\x00\x94\x21\x0A\xFA\x06\x52\x9F\x44" replace } err
+ assert_match "*Bad data format*" $err
+ verify_log_message 0 "*integrity check failed*" 0
+ }
+}
+
+test {corrupt payload: fuzzer findings - stream double free listpack when insert dup node to rax returns 0} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r debug set-skip-checksum-validation 1
+ r config set sanitize-dump-payload yes
+ catch { r restore _stream 0 "\x0F\x03\x10\x00\x00\x01\x7B\x60\x5A\x23\x79\x00\x00\x00\x00\x00\x00\x00\x00\xC3\x40\x4F\x40\x5C\x18\x5C\x00\x00\x00\x24\x00\x05\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x00\x01\x20\x03\x00\x05\x20\x1C\x40\x09\x05\x01\x01\x82\x5F\x31\x03\x80\x0D\x00\x02\x20\x0D\x00\x02\xA0\x19\x00\x03\x20\x0B\x02\x82\x5F\x33\xA0\x19\x00\x04\x20\x0D\x00\x04\x20\x19\x00\xFF\x10\x00\x00\x01\x7B\x60\x5A\x23\x79\x00\x00\x00\x00\x00\x00\x00\x05\xC3\x40\x51\x40\x5E\x18\x5E\x00\x00\x00\x24\x00\x05\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x06\x01\x01\x82\x5F\x35\x03\x05\x20\x1E\x40\x0B\x03\x01\x01\x06\x01\x80\x0B\x00\x02\x20\x0B\x02\x82\x5F\x37\xA0\x19\x00\x03\x20\x0D\x00\x08\xA0\x19\x00\x04\x20\x0B\x02\x82\x5F\x39\x20\x19\x00\xFF\x10\x00\x00\x01\x7B\x60\x5A\x23\x79\x00\x00\x00\x00\x00\x00\x00\x00\xC3\x3B\x40\x49\x18\x49\x00\x00\x00\x15\x00\x02\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x40\x00\x00\x05\x20\x07\x40\x09\xC0\x22\x09\x01\x01\x86\x75\x6E\x69\x71\x75\x65\x07\xA0\x2C\x02\x08\x01\xFF\x0C\x81\x00\x00\x01\x7B\x60\x5A\x23\x7A\x01\x00\x0A\x00\x9C\x8F\x1E\xBF\x2E\x05\x59\x09" replace } err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - LCS OOM} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r SETRANGE _int 423324 1450173551
+ catch {r LCS _int _int} err
+ assert_match "*Insufficient memory*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - gcc asan reports false leak on assert} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r debug set-skip-checksum-validation 1
+ r config set sanitize-dump-payload no
+ catch { r restore _list 0 "\x12\x01\x02\x13\x13\x00\x00\x00\x10\x00\x00\x00\x03\x00\x00\xF3\xFE\x02\x5F\x31\x04\xF1\xFF\x0A\x00\x19\x8D\x3D\x74\x85\x94\x29\xBD" }
+ catch { r LPOP _list } err
+ assert_equal [count_log_message 0 "crashed by signal"] 0
+ assert_equal [count_log_message 0 "ASSERTION FAILED"] 1
+ }
+}
+
+test {corrupt payload: fuzzer findings - lpFind invalid access} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r debug set-skip-checksum-validation 1
+ r config set sanitize-dump-payload no
+ r restore _hashbig 0 "\x10\x39\x39\x00\x00\x00\x14\x00\x06\x01\x06\x01\x03\x01\x82\x5F\x33\x03\x07\x01\x82\x5F\x37\x03\x00\x01\x00\x01\x04\x01\x04\x01\x09\x01\x82\x5F\x39\x03\x05\x01\x82\x5F\x35\x03\x08\x01\x08\x01\x01\x01\x82\x5F\x31\x03\x02\x01\xF0\x01\xFF\x0A\x00\x29\xD7\xE4\x52\x79\x7A\x95\x82"
+ catch { r HLEN _hashbig }
+ catch { r HSETNX _hashbig 513072881620 "\x9A\x4B\x1F\xF2\x99\x74\x6E\x96\x84\x7F\xB9\x85\xBE\xD6\x1A\x93\x0A\xED\xAE\x19\xA0\x5A\x67\xD6\x89\xA8\xF9\xF2\xB8\xBD\x3E\x5A\xCF\xD2\x5B\x17\xA4\xBB\xB2\xA9\x56\x67\x6E\x0B\xED\xCD\x36\x49\xC6\x84\xFF\xC2\x76\x9B\xF3\x49\x88\x97\x92\xD2\x54\xE9\x08\x19\x86\x40\x96\x24\x68\x25\x9D\xF7\x0E\xB7\x36\x85\x68\x6B\x2A\x97\x64\x30\xE6\xFF\x9A\x2A\x42\x2B\x31\x01\x32\xB3\xEE\x78\x1A\x26\x94\xE2\x07\x34\x50\x8A\xFF\xF9\xAE\xEA\xEC\x59\x42\xF5\x39\x40\x65\xDE\x55\xCC\x77\x1B\x32\x02\x19\xEE\x3C\xD4\x79\x48\x01\x4F\x51\xFE\x22\xE0\x0C\xF4\x07\x06\xCD\x55\x30\xC0\x24\x32\xD4\xCC\xAF\x82\x05\x48\x14\x10\x55\xA1\x3D\xF6\x81\x45\x54\xEA\x71\x24\x27\x06\xDC\xFA\xE4\xE4\x87\xCC\x81\xA0\x47\xA5\xAF\xD1\x89\xE7\x42\xC3\x24\xD0\x32\x7A\xDE\x44\x47\x6E\x1F\xCB\xEE\xA6\x46\xDE\x0D\xE6\xD5\x16\x03\x2A\xD6\x9E\xFD\x94\x02\x2C\xDB\x1F\xD0\xBE\x98\x10\xE3\xEB\xEA\xBE\xE5\xD1" }
+ }
+}
+
+test {corrupt payload: fuzzer findings - invalid access in ziplist tail prevlen decoding} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r debug set-skip-checksum-validation 1
+ r config set sanitize-dump-payload no
+ catch {r restore _listbig 0 "\x0e\x02\x1B\x1B\x00\x00\x00\x16\x00\x00\x00\x05\x00\x00\x02\x5F\x39\x04\xF9\x02\x02\x5F\x37\x04\xF7\x02\x02\x5F\x35\xFF\x19\x19\x00\x00\x00\x16\x00\x00\x00\x05\x00\x00\xF5\x02\x02\x5F\x33\x04\xF3\x02\x02\x5F\x31\xFE\xF1\xFF\x0A\x00\x6B\x43\x32\x2F\xBB\x29\x0a\xBE"} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - zset zslInsert with a NAN score} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch {r restore _nan_zset 0 "\x05\x0A\x02\x5F\x39\x00\x00\x00\x00\x00\x00\x22\x40\xC0\x08\x00\x00\x00\x00\x00\x00\x20\x40\x02\x5F\x37\x00\x00\x00\x00\x00\x00\x1C\x40\xC0\x06\x00\x00\x00\x00\x00\x00\x18\x40\x02\x5F\x35\x00\x00\x00\x00\x00\x00\x14\x40\xC0\x04\x00\x00\x00\x00\x00\x00\x10\x40\x02\x5F\x33\x00\x00\x00\x00\x00\x00\x08\x40\xC0\x02\x00\x00\x00\x00\x00\x00\x00\x40\x02\x5F\x31\x00\x00\x00\x00\x00\x55\xF0\x7F\xC0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0A\x00\xEC\x94\x86\xD8\xFD\x5C\x5F\xD8"} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - streamLastValidID panic} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch {r restore _streambig 0 "\x13\xC0\x10\x00\x00\x01\x80\x20\x48\xA0\x33\x00\x00\x00\x00\x00\x00\x00\x00\xC3\x40\x4F\x40\x5C\x18\x5C\x00\x00\x00\x24\x00\x05\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x00\x01\x20\x03\x00\x05\x20\x1C\x40\x09\x05\x01\x01\x82\x5F\x31\x03\x80\x0D\x00\x02\x20\x0D\x00\x02\xA0\x19\x00\x03\x20\x0B\x02\x82\x5F\x33\x60\x19\x40\x2F\x02\x01\x01\x04\x20\x19\x00\xFF\x10\x00\x00\x01\x80\x20\x48\xA0\x34\x00\x00\x00\x00\x00\x00\x00\x01\xC3\x40\x51\x40\x5E\x18\x5E\x00\x00\x00\x24\x00\x05\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x06\x01\x01\x82\x5F\x35\x03\x05\x20\x1E\x40\x0B\x03\x01\x01\x06\x01\x80\x0B\x00\x02\x20\x0B\x02\x82\x5F\x37\xA0\x19\x00\x03\x20\x0D\x00\x08\xA0\x19\x00\x04\x20\x0B\x02\x82\x5F\x39\x20\x19\x00\xFF\x10\x00\x00\x01\x80\x20\x48\xA0\x34\x00\x00\x00\x00\x00\x00\x00\x06\xC3\x3D\x40\x4A\x18\x4A\x00\x00\x00\x15\x00\x02\x01\x00\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x40\x10\x00\x00\x20\x01\x40\x00\x00\x05\x60\x07\x02\xDF\xFA\x02\xC0\x23\x09\x01\x01\x86\x75\x6E\x69\x71\x75\x65\x07\xA0\x2D\x02\x08\x01\xFF\x0C\x81\x00\x00\x01\x80\x20\x48\xA0\x35\x00\x81\x00\x00\x01\x80\x20\x48\xA0\x33\x00\x00\x00\x0C\x00\x0A\x00\x34\x8B\x0E\x5B\x42\xCD\xD6\x08"} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - valgrind fishy value warning} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch {r restore _key 0 "\x13\x01\x10\x00\x00\x01\x81\xCC\x07\xDC\xF2\x00\x00\x00\x00\x00\x00\x00\x00\x40\x42\x42\x00\x00\x00\x18\x00\x02\x01\x01\x01\x02\x01\x84\x69\x74\x65\x6D\x05\x85\x76\x61\x6C\x75\x65\x06\x00\x01\x02\x01\x00\x01\x00\x01\x01\x01\x00\x01\x05\x01\x03\x01\x2C\x01\x00\x01\x01\x01\x82\x5F\x31\x03\x05\x01\x02\x01\x3C\x01\x00\x01\x01\x01\x02\x01\x05\x01\xFF\x02\xD0\x00\x00\x01\x81\xCC\x07\xDD\x2E\x00\x81\x00\x00\x01\x81\xCC\x07\xDC\xF2\x00\x81\x00\x00\x01\x81\xCC\x07\xDD\x1E\x00\x03\x01\x07\x6D\x79\x67\x72\x6F\x75\x70\x81\x00\x00\x01\x81\xCC\x07\xDD\x1E\x00\x02\x01\x00\x00\x01\x81\xCC\x07\xDD\x1E\x00\x00\x00\x00\x00\x00\x00\x00\x71\xDD\x07\xCC\x81\x01\x00\x00\x01\x01\x05\x41\x6C\x69\x63\x65\x58\xDD\x07\xCC\x81\x01\x00\x00\x01\x00\x00\x01\x81\xCC\x07\xDD\x1E\x00\x00\x00\x00\x00\x00\x00\x00\x0A\x00\x2F\xB0\xD1\x15\x0A\x97\x87\x6B"} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - empty set listpack} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload no
+ r debug set-skip-checksum-validation 1
+ catch {r restore _key 0 "\x14\x25\x25\x00\x00\x00\x00\x00\x02\x01\x82\x5F\x37\x03\x06\x01\x82\x5F\x35\x03\x82\x5F\x33\x03\x00\x01\x82\x5F\x31\x03\x82\x5F\x39\x03\x04\xA9\x08\x01\xFF\x0B\x00\xA3\x26\x49\xB4\x86\xB0\x0F\x41"} err
+ assert_match "*Bad data format*" $err
+ r ping
+ }
+}
+
+test {corrupt payload: fuzzer findings - set with duplicate elements causes sdiff to hang} {
+ start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] {
+ r config set sanitize-dump-payload yes
+ r debug set-skip-checksum-validation 1
+ catch {r restore _key 0 "\x14\x25\x25\x00\x00\x00\x0A\x00\x06\x01\x82\x5F\x35\x03\x04\x01\x82\x5F\x31\x03\x82\x5F\x33\x03\x00\x01\x82\x5F\x39\x03\x82\x5F\x33\x03\x08\x01\x02\x01\xFF\x0B\x00\x31\xBE\x7D\x41\x01\x03\x5B\xEC" replace} err
+ assert_match "*Bad data format*" $err
+ r ping
+
+ # In the past, it generated a broken protocol and left the client hung in sdiff
+ r config set sanitize-dump-payload no
+ assert_equal {OK} [r restore _key 0 "\x14\x25\x25\x00\x00\x00\x0A\x00\x06\x01\x82\x5F\x35\x03\x04\x01\x82\x5F\x31\x03\x82\x5F\x33\x03\x00\x01\x82\x5F\x39\x03\x82\x5F\x33\x03\x08\x01\x02\x01\xFF\x0B\x00\x31\xBE\x7D\x41\x01\x03\x5B\xEC" replace]
+ assert_type set _key
+ assert_encoding listpack _key
+ assert_equal 10 [r scard _key]
+ assert_equal {0 2 4 6 8 _1 _3 _3 _5 _9} [lsort [r smembers _key]]
+ assert_equal {0 2 4 6 8 _1 _3 _5 _9} [lsort [r sdiff _key]]
+ }
+} {} {logreqres:skip} ;# This test violates {"uniqueItems": true}
+
+} ;# tags
+
diff --git a/tests/integration/dismiss-mem.tcl b/tests/integration/dismiss-mem.tcl
new file mode 100644
index 0000000..87f6e1d
--- /dev/null
+++ b/tests/integration/dismiss-mem.tcl
@@ -0,0 +1,101 @@
+# The tests of this file aim to get coverage on all the "dismiss" methods
+# that dismiss all data-types memory in the fork child. like client query
+# buffer, client output buffer and replication backlog.
+# Actually, we may not have many asserts in the test, since we just check for
+# crashes and the dump file inconsistencies.
+
+start_server {tags {"dismiss external:skip"}} {
+ # In other tests, although we test child process dumping RDB file, but
+ # memory allocations of key/values are usually small, they couldn't cover
+ # the "dismiss" object methods, in this test, we create big size key/values
+ # to satisfy the conditions for release memory pages, especially, we assume
+ # the page size of OS is 4KB in some cases.
+ test {dismiss all data types memory} {
+ set bigstr [string repeat A 8192]
+ set 64bytes [string repeat A 64]
+
+ # string
+ populate 100 bigstring 8192
+
+ # list
+ r lpush biglist1 $bigstr ; # uncompressed ziplist node
+ r config set list-compress-depth 1 ; # compressed ziplist nodes
+ for {set i 0} {$i < 16} {incr i} {
+ r lpush biglist2 $bigstr
+ }
+
+ # set
+ r sadd bigset1 $bigstr ; # hash encoding
+ set biginteger [string repeat 1 19]
+ for {set i 0} {$i < 512} {incr i} {
+ r sadd bigset2 $biginteger ; # intset encoding
+ }
+
+ # zset
+ r zadd bigzset1 1.0 $bigstr ; # skiplist encoding
+ for {set i 0} {$i < 128} {incr i} {
+ r zadd bigzset2 1.0 $64bytes ; # ziplist encoding
+ }
+
+ # hash
+ r hset bighash1 field1 $bigstr ; # hash encoding
+ for {set i 0} {$i < 128} {incr i} {
+ r hset bighash2 $i $64bytes ; # ziplist encoding
+ }
+
+ # stream
+ r xadd bigstream * entry1 $bigstr entry2 $bigstr
+
+ set digest [debug_digest]
+ r config set aof-use-rdb-preamble no
+ r bgrewriteaof
+ waitForBgrewriteaof r
+ r debug loadaof
+ set newdigest [debug_digest]
+ assert {$digest eq $newdigest}
+ }
+
+ test {dismiss client output buffer} {
+ # Big output buffer
+ set item [string repeat "x" 100000]
+ for {set i 0} {$i < 100} {incr i} {
+ r lpush mylist $item
+ }
+ set rd [redis_deferring_client]
+ $rd lrange mylist 0 -1
+ $rd flush
+ after 100
+
+ r bgsave
+ waitForBgsave r
+ assert_equal $item [r lpop mylist]
+ }
+
+ test {dismiss client query buffer} {
+ # Big pending query buffer
+ set bigstr [string repeat A 8192]
+ set rd [redis_deferring_client]
+ $rd write "*2\r\n\$8192\r\n"
+ $rd write $bigstr\r\n
+ $rd flush
+ after 100
+
+ r bgsave
+ waitForBgsave r
+ }
+
+ test {dismiss replication backlog} {
+ set master [srv 0 client]
+ start_server {} {
+ r slaveof [srv -1 host] [srv -1 port]
+ wait_for_sync r
+
+ set bigstr [string repeat A 8192]
+ for {set i 0} {$i < 20} {incr i} {
+ $master set $i $bigstr
+ }
+ $master bgsave
+ waitForBgsave $master
+ }
+ }
+}
diff --git a/tests/integration/failover.tcl b/tests/integration/failover.tcl
new file mode 100644
index 0000000..21fa3d2
--- /dev/null
+++ b/tests/integration/failover.tcl
@@ -0,0 +1,294 @@
+start_server {tags {"failover external:skip"} overrides {save {}}} {
+start_server {overrides {save {}}} {
+start_server {overrides {save {}}} {
+ set node_0 [srv 0 client]
+ set node_0_host [srv 0 host]
+ set node_0_port [srv 0 port]
+ set node_0_pid [srv 0 pid]
+
+ set node_1 [srv -1 client]
+ set node_1_host [srv -1 host]
+ set node_1_port [srv -1 port]
+ set node_1_pid [srv -1 pid]
+
+ set node_2 [srv -2 client]
+ set node_2_host [srv -2 host]
+ set node_2_port [srv -2 port]
+ set node_2_pid [srv -2 pid]
+
+ proc assert_digests_match {n1 n2 n3} {
+ assert_equal [$n1 debug digest] [$n2 debug digest]
+ assert_equal [$n2 debug digest] [$n3 debug digest]
+ }
+
+ test {failover command fails without connected replica} {
+ catch { $node_0 failover to $node_1_host $node_1_port } err
+ if {! [string match "ERR*" $err]} {
+ fail "failover command succeeded when replica not connected"
+ }
+ }
+
+ test {setup replication for following tests} {
+ $node_1 replicaof $node_0_host $node_0_port
+ $node_2 replicaof $node_0_host $node_0_port
+ wait_for_sync $node_1
+ wait_for_sync $node_2
+ }
+
+ test {failover command fails with invalid host} {
+ catch { $node_0 failover to invalidhost $node_1_port } err
+ assert_match "ERR*" $err
+ }
+
+ test {failover command fails with invalid port} {
+ catch { $node_0 failover to $node_1_host invalidport } err
+ assert_match "ERR*" $err
+ }
+
+ test {failover command fails with just force and timeout} {
+ catch { $node_0 FAILOVER FORCE TIMEOUT 100} err
+ assert_match "ERR*" $err
+ }
+
+ test {failover command fails when sent to a replica} {
+ catch { $node_1 failover to $node_1_host $node_1_port } err
+ assert_match "ERR*" $err
+ }
+
+ test {failover command fails with force without timeout} {
+ catch { $node_0 failover to $node_1_host $node_1_port FORCE } err
+ assert_match "ERR*" $err
+ }
+
+ test {failover command to specific replica works} {
+ set initial_psyncs [s -1 sync_partial_ok]
+ set initial_syncs [s -1 sync_full]
+
+ # Generate a delta between primary and replica
+ set load_handler [start_write_load $node_0_host $node_0_port 5]
+ pause_process [srv -1 pid]
+ wait_for_condition 50 100 {
+ [s 0 total_commands_processed] > 100
+ } else {
+ fail "Node 0 did not accept writes"
+ }
+ resume_process [srv -1 pid]
+
+ # Execute the failover
+ $node_0 failover to $node_1_host $node_1_port
+
+ # Wait for failover to end
+ wait_for_condition 50 100 {
+ [s 0 master_failover_state] == "no-failover"
+ } else {
+ fail "Failover from node 0 to node 1 did not finish"
+ }
+
+ # stop the write load and make sure no more commands processed
+ stop_write_load $load_handler
+ wait_load_handlers_disconnected
+
+ $node_2 replicaof $node_1_host $node_1_port
+ wait_for_sync $node_0
+ wait_for_sync $node_2
+
+ assert_match *slave* [$node_0 role]
+ assert_match *master* [$node_1 role]
+ assert_match *slave* [$node_2 role]
+
+ # We should accept psyncs from both nodes
+ assert_equal [expr [s -1 sync_partial_ok] - $initial_psyncs] 2
+ assert_equal [expr [s -1 sync_full] - $initial_psyncs] 0
+ assert_digests_match $node_0 $node_1 $node_2
+ }
+
+ test {failover command to any replica works} {
+ set initial_psyncs [s -2 sync_partial_ok]
+ set initial_syncs [s -2 sync_full]
+
+ wait_for_ofs_sync $node_1 $node_2
+ # We stop node 0 to and make sure node 2 is selected
+ pause_process $node_0_pid
+ $node_1 set CASE 1
+ $node_1 FAILOVER
+
+ # Wait for failover to end
+ wait_for_condition 50 100 {
+ [s -1 master_failover_state] == "no-failover"
+ } else {
+ fail "Failover from node 1 to node 2 did not finish"
+ }
+ resume_process $node_0_pid
+ $node_0 replicaof $node_2_host $node_2_port
+
+ wait_for_sync $node_0
+ wait_for_sync $node_1
+
+ assert_match *slave* [$node_0 role]
+ assert_match *slave* [$node_1 role]
+ assert_match *master* [$node_2 role]
+
+ # We should accept Psyncs from both nodes
+ assert_equal [expr [s -2 sync_partial_ok] - $initial_psyncs] 2
+ assert_equal [expr [s -1 sync_full] - $initial_psyncs] 0
+ assert_digests_match $node_0 $node_1 $node_2
+ }
+
+ test {failover to a replica with force works} {
+ set initial_psyncs [s 0 sync_partial_ok]
+ set initial_syncs [s 0 sync_full]
+
+ pause_process $node_0_pid
+ # node 0 will never acknowledge this write
+ $node_2 set case 2
+ $node_2 failover to $node_0_host $node_0_port TIMEOUT 100 FORCE
+
+ # Wait for node 0 to give up on sync attempt and start failover
+ wait_for_condition 50 100 {
+ [s -2 master_failover_state] == "failover-in-progress"
+ } else {
+ fail "Failover from node 2 to node 0 did not timeout"
+ }
+
+ # Quick check that everyone is a replica, we never want a
+ # state where there are two masters.
+ assert_match *slave* [$node_1 role]
+ assert_match *slave* [$node_2 role]
+
+ resume_process $node_0_pid
+
+ # Wait for failover to end
+ wait_for_condition 50 100 {
+ [s -2 master_failover_state] == "no-failover"
+ } else {
+ fail "Failover from node 2 to node 0 did not finish"
+ }
+ $node_1 replicaof $node_0_host $node_0_port
+
+ wait_for_sync $node_1
+ wait_for_sync $node_2
+
+ assert_match *master* [$node_0 role]
+ assert_match *slave* [$node_1 role]
+ assert_match *slave* [$node_2 role]
+
+ assert_equal [count_log_message -2 "time out exceeded, failing over."] 1
+
+ # We should accept both psyncs, although this is the condition we might not
+ # since we didn't catch up.
+ assert_equal [expr [s 0 sync_partial_ok] - $initial_psyncs] 2
+ assert_equal [expr [s 0 sync_full] - $initial_syncs] 0
+ assert_digests_match $node_0 $node_1 $node_2
+ }
+
+ test {failover with timeout aborts if replica never catches up} {
+ set initial_psyncs [s 0 sync_partial_ok]
+ set initial_syncs [s 0 sync_full]
+
+ # Stop replica so it never catches up
+ pause_process [srv -1 pid]
+ $node_0 SET CASE 1
+
+ $node_0 failover to [srv -1 host] [srv -1 port] TIMEOUT 500
+ # Wait for failover to end
+ wait_for_condition 50 20 {
+ [s 0 master_failover_state] == "no-failover"
+ } else {
+ fail "Failover from node_0 to replica did not finish"
+ }
+
+ resume_process [srv -1 pid]
+
+ # We need to make sure the nodes actually sync back up
+ wait_for_ofs_sync $node_0 $node_1
+ wait_for_ofs_sync $node_0 $node_2
+
+ assert_match *master* [$node_0 role]
+ assert_match *slave* [$node_1 role]
+ assert_match *slave* [$node_2 role]
+
+ # Since we never caught up, there should be no syncs
+ assert_equal [expr [s 0 sync_partial_ok] - $initial_psyncs] 0
+ assert_equal [expr [s 0 sync_full] - $initial_syncs] 0
+ assert_digests_match $node_0 $node_1 $node_2
+ }
+
+ test {failovers can be aborted} {
+ set initial_psyncs [s 0 sync_partial_ok]
+ set initial_syncs [s 0 sync_full]
+
+ # Stop replica so it never catches up
+ pause_process [srv -1 pid]
+ $node_0 SET CASE 2
+
+ $node_0 failover to [srv -1 host] [srv -1 port] TIMEOUT 60000
+ assert_match [s 0 master_failover_state] "waiting-for-sync"
+
+ # Sanity check that read commands are still accepted
+ $node_0 GET CASE
+
+ $node_0 failover abort
+ assert_match [s 0 master_failover_state] "no-failover"
+
+ resume_process [srv -1 pid]
+
+ # Just make sure everything is still synced
+ wait_for_ofs_sync $node_0 $node_1
+ wait_for_ofs_sync $node_0 $node_2
+
+ assert_match *master* [$node_0 role]
+ assert_match *slave* [$node_1 role]
+ assert_match *slave* [$node_2 role]
+
+ # Since we never caught up, there should be no syncs
+ assert_equal [expr [s 0 sync_partial_ok] - $initial_psyncs] 0
+ assert_equal [expr [s 0 sync_full] - $initial_syncs] 0
+ assert_digests_match $node_0 $node_1 $node_2
+ }
+
+ test {failover aborts if target rejects sync request} {
+ set initial_psyncs [s 0 sync_partial_ok]
+ set initial_syncs [s 0 sync_full]
+
+ # We block psync, so the failover will fail
+ $node_1 acl setuser default -psync
+
+ # We pause the target long enough to send a write command
+ # during the pause. This write will not be interrupted.
+ pause_process [srv -1 pid]
+ set rd [redis_deferring_client]
+ $rd SET FOO BAR
+ $node_0 failover to $node_1_host $node_1_port
+ resume_process [srv -1 pid]
+
+ # Wait for failover to end
+ wait_for_condition 50 100 {
+ [s 0 master_failover_state] == "no-failover"
+ } else {
+ fail "Failover from node_0 to replica did not finish"
+ }
+
+ assert_equal [$rd read] "OK"
+ $rd close
+
+ # restore access to psync
+ $node_1 acl setuser default +psync
+
+ # We need to make sure the nodes actually sync back up
+ wait_for_sync $node_1
+ wait_for_sync $node_2
+
+ assert_match *master* [$node_0 role]
+ assert_match *slave* [$node_1 role]
+ assert_match *slave* [$node_2 role]
+
+ # We will cycle all of our replicas here and force a psync.
+ assert_equal [expr [s 0 sync_partial_ok] - $initial_psyncs] 2
+ assert_equal [expr [s 0 sync_full] - $initial_syncs] 0
+
+ assert_equal [count_log_message 0 "Failover target rejected psync request"] 1
+ assert_digests_match $node_0 $node_1 $node_2
+ }
+}
+}
+}
diff --git a/tests/integration/logging.tcl b/tests/integration/logging.tcl
new file mode 100644
index 0000000..4f8639b
--- /dev/null
+++ b/tests/integration/logging.tcl
@@ -0,0 +1,61 @@
+tags {"external:skip"} {
+
+set system_name [string tolower [exec uname -s]]
+set backtrace_supported 0
+
+# We only support darwin or Linux with glibc
+if {$system_name eq {darwin}} {
+ set backtrace_supported 1
+} elseif {$system_name eq {linux}} {
+ # Avoid the test on libmusl, which does not support backtrace
+ # and on static binaries (ldd exit code 1) where we can't detect libmusl
+ catch {
+ set ldd [exec ldd src/redis-server]
+ if {![string match {*libc.*musl*} $ldd]} {
+ set backtrace_supported 1
+ }
+ }
+}
+
+if {$backtrace_supported} {
+ set server_path [tmpdir server.log]
+ start_server [list overrides [list dir $server_path]] {
+ test "Server is able to generate a stack trace on selected systems" {
+ r config set watchdog-period 200
+ r debug sleep 1
+ set pattern "*debugCommand*"
+ set res [wait_for_log_messages 0 \"$pattern\" 0 100 100]
+ if {$::verbose} { puts $res }
+ }
+ }
+}
+
+# Valgrind will complain that the process terminated by a signal, skip it.
+if {!$::valgrind} {
+ if {$backtrace_supported} {
+ set crash_pattern "*STACK TRACE*"
+ } else {
+ set crash_pattern "*crashed by signal*"
+ }
+
+ set server_path [tmpdir server1.log]
+ start_server [list overrides [list dir $server_path crash-memcheck-enabled no]] {
+ test "Crash report generated on SIGABRT" {
+ set pid [s process_id]
+ exec kill -SIGABRT $pid
+ set res [wait_for_log_messages 0 \"$crash_pattern\" 0 50 100]
+ if {$::verbose} { puts $res }
+ }
+ }
+
+ set server_path [tmpdir server2.log]
+ start_server [list overrides [list dir $server_path crash-memcheck-enabled no]] {
+ test "Crash report generated on DEBUG SEGFAULT" {
+ catch {r debug segfault}
+ set res [wait_for_log_messages 0 \"$crash_pattern\" 0 50 100]
+ if {$::verbose} { puts $res }
+ }
+ }
+}
+
+}
diff --git a/tests/integration/psync2-master-restart.tcl b/tests/integration/psync2-master-restart.tcl
new file mode 100644
index 0000000..a9e21d1
--- /dev/null
+++ b/tests/integration/psync2-master-restart.tcl
@@ -0,0 +1,218 @@
+start_server {tags {"psync2 external:skip"}} {
+start_server {} {
+start_server {} {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+
+ set replica [srv -1 client]
+ set replica_host [srv -1 host]
+ set replica_port [srv -1 port]
+
+ set sub_replica [srv -2 client]
+
+ # Make sure the server saves an RDB on shutdown
+ $master config set save "3600 1"
+
+ # Because we will test partial resync later, we don’t want a timeout to cause
+ # the master-replica disconnect, then the extra reconnections will break the
+ # sync_partial_ok stat test
+ $master config set repl-timeout 3600
+ $replica config set repl-timeout 3600
+ $sub_replica config set repl-timeout 3600
+
+ # Avoid PINGs
+ $master config set repl-ping-replica-period 3600
+ $master config rewrite
+
+ # Build replication chain
+ $replica replicaof $master_host $master_port
+ $sub_replica replicaof $replica_host $replica_port
+
+ wait_for_condition 50 100 {
+ [status $replica master_link_status] eq {up} &&
+ [status $sub_replica master_link_status] eq {up}
+ } else {
+ fail "Replication not started."
+ }
+
+ test "PSYNC2: Partial resync after Master restart using RDB aux fields when offset is 0" {
+ assert {[status $master master_repl_offset] == 0}
+
+ set replid [status $master master_replid]
+ $replica config resetstat
+
+ catch {
+ restart_server 0 true false true now
+ set master [srv 0 client]
+ }
+ wait_for_condition 50 1000 {
+ [status $replica master_link_status] eq {up} &&
+ [status $sub_replica master_link_status] eq {up}
+ } else {
+ fail "Replicas didn't sync after master restart"
+ }
+
+ # Make sure master restore replication info correctly
+ assert {[status $master master_replid] != $replid}
+ assert {[status $master master_repl_offset] == 0}
+ assert {[status $master master_replid2] eq $replid}
+ assert {[status $master second_repl_offset] == 1}
+
+ # Make sure master set replication backlog correctly
+ assert {[status $master repl_backlog_active] == 1}
+ assert {[status $master repl_backlog_first_byte_offset] == 1}
+ assert {[status $master repl_backlog_histlen] == 0}
+
+ # Partial resync after Master restart
+ assert {[status $master sync_partial_ok] == 1}
+ assert {[status $replica sync_partial_ok] == 1}
+ }
+
+ # Generate some data
+ createComplexDataset $master 1000
+
+ test "PSYNC2: Partial resync after Master restart using RDB aux fields with data" {
+ wait_for_condition 500 100 {
+ [status $master master_repl_offset] == [status $replica master_repl_offset] &&
+ [status $master master_repl_offset] == [status $sub_replica master_repl_offset]
+ } else {
+ fail "Replicas and master offsets were unable to match *exactly*."
+ }
+
+ set replid [status $master master_replid]
+ set offset [status $master master_repl_offset]
+ $replica config resetstat
+
+ catch {
+ # SHUTDOWN NOW ensures master doesn't send GETACK to replicas before
+ # shutting down which would affect the replication offset.
+ restart_server 0 true false true now
+ set master [srv 0 client]
+ }
+ wait_for_condition 50 1000 {
+ [status $replica master_link_status] eq {up} &&
+ [status $sub_replica master_link_status] eq {up}
+ } else {
+ fail "Replicas didn't sync after master restart"
+ }
+
+ # Make sure master restore replication info correctly
+ assert {[status $master master_replid] != $replid}
+ assert {[status $master master_repl_offset] == $offset}
+ assert {[status $master master_replid2] eq $replid}
+ assert {[status $master second_repl_offset] == [expr $offset+1]}
+
+ # Make sure master set replication backlog correctly
+ assert {[status $master repl_backlog_active] == 1}
+ assert {[status $master repl_backlog_first_byte_offset] == [expr $offset+1]}
+ assert {[status $master repl_backlog_histlen] == 0}
+
+ # Partial resync after Master restart
+ assert {[status $master sync_partial_ok] == 1}
+ assert {[status $replica sync_partial_ok] == 1}
+ }
+
+ test "PSYNC2: Partial resync after Master restart using RDB aux fields with expire" {
+ $master debug set-active-expire 0
+ for {set j 0} {$j < 1024} {incr j} {
+ $master select [expr $j%16]
+ $master set $j somevalue px 10
+ }
+
+ after 20
+
+ # Wait until master has received ACK from replica. If the master thinks
+ # that any replica is lagging when it shuts down, master would send
+ # GETACK to the replicas, affecting the replication offset.
+ set offset [status $master master_repl_offset]
+ wait_for_condition 500 100 {
+ [string match "*slave0:*,offset=$offset,*" [$master info replication]] &&
+ $offset == [status $replica master_repl_offset] &&
+ $offset == [status $sub_replica master_repl_offset]
+ } else {
+ show_cluster_status
+ fail "Replicas and master offsets were unable to match *exactly*."
+ }
+
+ set offset [status $master master_repl_offset]
+ $replica config resetstat
+
+ catch {
+ # Unlike the test above, here we use SIGTERM, which behaves
+ # differently compared to SHUTDOWN NOW if there are lagging
+ # replicas. This is just to increase coverage and let each test use
+ # a different shutdown approach. In this case there are no lagging
+ # replicas though.
+ restart_server 0 true false
+ set master [srv 0 client]
+ }
+ wait_for_condition 50 1000 {
+ [status $replica master_link_status] eq {up} &&
+ [status $sub_replica master_link_status] eq {up}
+ } else {
+ fail "Replicas didn't sync after master restart"
+ }
+
+ set expired_offset [status $master repl_backlog_histlen]
+ # Stale keys expired and master_repl_offset grows correctly
+ assert {[status $master rdb_last_load_keys_expired] == 1024}
+ assert {[status $master master_repl_offset] == [expr $offset+$expired_offset]}
+
+ # Partial resync after Master restart
+ assert {[status $master sync_partial_ok] == 1}
+ assert {[status $replica sync_partial_ok] == 1}
+
+ set digest [$master debug digest]
+ assert {$digest eq [$replica debug digest]}
+ assert {$digest eq [$sub_replica debug digest]}
+ }
+
+ test "PSYNC2: Full resync after Master restart when too many key expired" {
+ $master config set repl-backlog-size 16384
+ $master config rewrite
+
+ $master debug set-active-expire 0
+ # Make sure replication backlog is full and will be trimmed.
+ for {set j 0} {$j < 2048} {incr j} {
+ $master select [expr $j%16]
+ $master set $j somevalue px 10
+ }
+
+ after 20
+
+ wait_for_condition 500 100 {
+ [status $master master_repl_offset] == [status $replica master_repl_offset] &&
+ [status $master master_repl_offset] == [status $sub_replica master_repl_offset]
+ } else {
+ fail "Replicas and master offsets were unable to match *exactly*."
+ }
+
+ $replica config resetstat
+
+ catch {
+ # Unlike the test above, here we use SIGTERM. This is just to
+ # increase coverage and let each test use a different shutdown
+ # approach.
+ restart_server 0 true false
+ set master [srv 0 client]
+ }
+ wait_for_condition 50 1000 {
+ [status $replica master_link_status] eq {up} &&
+ [status $sub_replica master_link_status] eq {up}
+ } else {
+ fail "Replicas didn't sync after master restart"
+ }
+
+ # Replication backlog is full
+ assert {[status $master repl_backlog_first_byte_offset] > [status $master second_repl_offset]}
+ assert {[status $master sync_partial_ok] == 0}
+ assert {[status $master sync_full] == 1}
+ assert {[status $master rdb_last_load_keys_expired] == 2048}
+ assert {[status $replica sync_full] == 1}
+
+ set digest [$master debug digest]
+ assert {$digest eq [$replica debug digest]}
+ assert {$digest eq [$sub_replica debug digest]}
+ }
+}}}
diff --git a/tests/integration/psync2-pingoff.tcl b/tests/integration/psync2-pingoff.tcl
new file mode 100644
index 0000000..3589d07
--- /dev/null
+++ b/tests/integration/psync2-pingoff.tcl
@@ -0,0 +1,250 @@
+# These tests were added together with the meaningful offset implementation
+# in redis 6.0.0, which was later abandoned in 6.0.4, they used to test that
+# servers are able to PSYNC with replicas even if the replication stream has
+# PINGs at the end which present in one sever and missing on another.
+# We keep these tests just because they reproduce edge cases in the replication
+# logic in hope they'll be able to spot some problem in the future.
+
+start_server {tags {"psync2 external:skip"}} {
+start_server {} {
+ # Config
+ set debug_msg 0 ; # Enable additional debug messages
+
+ for {set j 0} {$j < 2} {incr j} {
+ set R($j) [srv [expr 0-$j] client]
+ set R_host($j) [srv [expr 0-$j] host]
+ set R_port($j) [srv [expr 0-$j] port]
+ $R($j) CONFIG SET repl-ping-replica-period 1
+ if {$debug_msg} {puts "Log file: [srv [expr 0-$j] stdout]"}
+ }
+
+ # Setup replication
+ test "PSYNC2 pingoff: setup" {
+ $R(1) replicaof $R_host(0) $R_port(0)
+ $R(0) set foo bar
+ wait_for_condition 50 1000 {
+ [status $R(1) master_link_status] == "up" &&
+ [$R(0) dbsize] == 1 && [$R(1) dbsize] == 1
+ } else {
+ fail "Replicas not replicating from master"
+ }
+ }
+
+ test "PSYNC2 pingoff: write and wait replication" {
+ $R(0) INCR counter
+ $R(0) INCR counter
+ $R(0) INCR counter
+ wait_for_condition 50 1000 {
+ [$R(0) GET counter] eq [$R(1) GET counter]
+ } else {
+ fail "Master and replica don't agree about counter"
+ }
+ }
+
+ # In this test we'll make sure the replica will get stuck, but with
+ # an active connection: this way the master will continue to send PINGs
+ # every second (we modified the PING period earlier)
+ test "PSYNC2 pingoff: pause replica and promote it" {
+ $R(1) MULTI
+ $R(1) DEBUG SLEEP 5
+ $R(1) SLAVEOF NO ONE
+ $R(1) EXEC
+ $R(1) ping ; # Wait for it to return back available
+ }
+
+ test "Make the old master a replica of the new one and check conditions" {
+ # We set the new master's ping period to a high value, so that there's
+ # no chance for a race condition of sending a PING in between the two
+ # INFO calls in the assert for master_repl_offset match below.
+ $R(1) CONFIG SET repl-ping-replica-period 1000
+
+ assert_equal [status $R(1) sync_full] 0
+ $R(0) REPLICAOF $R_host(1) $R_port(1)
+
+ wait_for_condition 50 1000 {
+ [status $R(0) master_link_status] == "up"
+ } else {
+ fail "The new master was not able to sync"
+ }
+
+ # make sure replication is still alive and kicking
+ $R(1) incr x
+ wait_for_condition 50 1000 {
+ [status $R(0) loading] == 0 &&
+ [$R(0) get x] == 1
+ } else {
+ fail "replica didn't get incr"
+ }
+ assert_equal [status $R(0) master_repl_offset] [status $R(1) master_repl_offset]
+ }
+}}
+
+
+start_server {tags {"psync2 external:skip"}} {
+start_server {} {
+start_server {} {
+start_server {} {
+start_server {} {
+ test {test various edge cases of repl topology changes with missing pings at the end} {
+ set master [srv -4 client]
+ set master_host [srv -4 host]
+ set master_port [srv -4 port]
+ set replica1 [srv -3 client]
+ set replica2 [srv -2 client]
+ set replica3 [srv -1 client]
+ set replica4 [srv -0 client]
+
+ $replica1 replicaof $master_host $master_port
+ $replica2 replicaof $master_host $master_port
+ $replica3 replicaof $master_host $master_port
+ $replica4 replicaof $master_host $master_port
+ wait_for_condition 50 1000 {
+ [status $master connected_slaves] == 4
+ } else {
+ fail "replicas didn't connect"
+ }
+
+ $master incr x
+ wait_for_condition 50 1000 {
+ [$replica1 get x] == 1 && [$replica2 get x] == 1 &&
+ [$replica3 get x] == 1 && [$replica4 get x] == 1
+ } else {
+ fail "replicas didn't get incr"
+ }
+
+ # disconnect replica1 and replica2
+ # and wait for the master to send a ping to replica3 and replica4
+ $replica1 replicaof no one
+ $replica2 replicaof 127.0.0.1 1 ;# we can't promote it to master since that will cycle the replication id
+ $master config set repl-ping-replica-period 1
+ set replofs [status $master master_repl_offset]
+ wait_for_condition 50 100 {
+ [status $replica3 master_repl_offset] > $replofs &&
+ [status $replica4 master_repl_offset] > $replofs
+ } else {
+ fail "replica didn't sync in time"
+ }
+
+ # make everyone sync from the replica1 that didn't get the last ping from the old master
+ # replica4 will keep syncing from the old master which now syncs from replica1
+ # and replica2 will re-connect to the old master (which went back in time)
+ set new_master_host [srv -3 host]
+ set new_master_port [srv -3 port]
+ $replica3 replicaof $new_master_host $new_master_port
+ $master replicaof $new_master_host $new_master_port
+ $replica2 replicaof $master_host $master_port
+ wait_for_condition 50 1000 {
+ [status $replica2 master_link_status] == "up" &&
+ [status $replica3 master_link_status] == "up" &&
+ [status $replica4 master_link_status] == "up" &&
+ [status $master master_link_status] == "up"
+ } else {
+ fail "replicas didn't connect"
+ }
+
+ # make sure replication is still alive and kicking
+ $replica1 incr x
+ wait_for_condition 50 1000 {
+ [$replica2 get x] == 2 &&
+ [$replica3 get x] == 2 &&
+ [$replica4 get x] == 2 &&
+ [$master get x] == 2
+ } else {
+ fail "replicas didn't get incr"
+ }
+
+ # make sure we have the right amount of full syncs
+ assert_equal [status $master sync_full] 6
+ assert_equal [status $replica1 sync_full] 2
+ assert_equal [status $replica2 sync_full] 0
+ assert_equal [status $replica3 sync_full] 0
+ assert_equal [status $replica4 sync_full] 0
+
+ # force psync
+ $master client kill type master
+ $replica2 client kill type master
+ $replica3 client kill type master
+ $replica4 client kill type master
+
+ # make sure replication is still alive and kicking
+ $replica1 incr x
+ wait_for_condition 50 1000 {
+ [$replica2 get x] == 3 &&
+ [$replica3 get x] == 3 &&
+ [$replica4 get x] == 3 &&
+ [$master get x] == 3
+ } else {
+ fail "replicas didn't get incr"
+ }
+
+ # make sure we have the right amount of full syncs
+ assert_equal [status $master sync_full] 6
+ assert_equal [status $replica1 sync_full] 2
+ assert_equal [status $replica2 sync_full] 0
+ assert_equal [status $replica3 sync_full] 0
+ assert_equal [status $replica4 sync_full] 0
+}
+}}}}}
+
+start_server {tags {"psync2 external:skip"}} {
+start_server {} {
+start_server {} {
+
+ for {set j 0} {$j < 3} {incr j} {
+ set R($j) [srv [expr 0-$j] client]
+ set R_host($j) [srv [expr 0-$j] host]
+ set R_port($j) [srv [expr 0-$j] port]
+ $R($j) CONFIG SET repl-ping-replica-period 1
+ }
+
+ test "Chained replicas disconnect when replica re-connect with the same master" {
+ # Add a second replica as a chained replica of the current replica
+ $R(1) replicaof $R_host(0) $R_port(0)
+ $R(2) replicaof $R_host(1) $R_port(1)
+ wait_for_condition 50 1000 {
+ [status $R(2) master_link_status] == "up"
+ } else {
+ fail "Chained replica not replicating from its master"
+ }
+
+ # Do a write on the master, and wait for the master to
+ # send some PINGs to its replica
+ $R(0) INCR counter2
+ set replofs [status $R(0) master_repl_offset]
+ wait_for_condition 50 100 {
+ [status $R(1) master_repl_offset] > $replofs &&
+ [status $R(2) master_repl_offset] > $replofs
+ } else {
+ fail "replica didn't sync in time"
+ }
+ set sync_partial_master [status $R(0) sync_partial_ok]
+ set sync_partial_replica [status $R(1) sync_partial_ok]
+ $R(0) CONFIG SET repl-ping-replica-period 100
+
+ # Disconnect the master's direct replica
+ $R(0) client kill type replica
+ wait_for_condition 50 1000 {
+ [status $R(1) master_link_status] == "up" &&
+ [status $R(2) master_link_status] == "up" &&
+ [status $R(0) sync_partial_ok] == $sync_partial_master + 1 &&
+ [status $R(1) sync_partial_ok] == $sync_partial_replica
+ } else {
+ fail "Disconnected replica failed to PSYNC with master"
+ }
+
+ # Verify that the replica and its replica's meaningful and real
+ # offsets match with the master
+ assert_equal [status $R(0) master_repl_offset] [status $R(1) master_repl_offset]
+ assert_equal [status $R(0) master_repl_offset] [status $R(2) master_repl_offset]
+
+ # make sure replication is still alive and kicking
+ $R(0) incr counter2
+ wait_for_condition 50 1000 {
+ [$R(1) get counter2] == 2 && [$R(2) get counter2] == 2
+ } else {
+ fail "replicas didn't get incr"
+ }
+ assert_equal [status $R(0) master_repl_offset] [status $R(1) master_repl_offset]
+ assert_equal [status $R(0) master_repl_offset] [status $R(2) master_repl_offset]
+ }
+}}}
diff --git a/tests/integration/psync2-reg.tcl b/tests/integration/psync2-reg.tcl
new file mode 100644
index 0000000..b8dd101
--- /dev/null
+++ b/tests/integration/psync2-reg.tcl
@@ -0,0 +1,82 @@
+# Issue 3899 regression test.
+# We create a chain of three instances: master -> slave -> slave2
+# and continuously break the link while traffic is generated by
+# redis-benchmark. At the end we check that the data is the same
+# everywhere.
+
+start_server {tags {"psync2 external:skip"}} {
+start_server {} {
+start_server {} {
+ # Config
+ set debug_msg 0 ; # Enable additional debug messages
+
+ set no_exit 0 ; # Do not exit at end of the test
+
+ set duration 20 ; # Total test seconds
+
+ for {set j 0} {$j < 3} {incr j} {
+ set R($j) [srv [expr 0-$j] client]
+ set R_host($j) [srv [expr 0-$j] host]
+ set R_port($j) [srv [expr 0-$j] port]
+ set R_unixsocket($j) [srv [expr 0-$j] unixsocket]
+ if {$debug_msg} {puts "Log file: [srv [expr 0-$j] stdout]"}
+ }
+
+ # Setup the replication and backlog parameters
+ test "PSYNC2 #3899 regression: setup" {
+ $R(1) slaveof $R_host(0) $R_port(0)
+ $R(2) slaveof $R_host(0) $R_port(0)
+ $R(0) set foo bar
+ wait_for_condition 50 1000 {
+ [status $R(1) master_link_status] == "up" &&
+ [status $R(2) master_link_status] == "up" &&
+ [$R(1) dbsize] == 1 &&
+ [$R(2) dbsize] == 1
+ } else {
+ fail "Replicas not replicating from master"
+ }
+ $R(0) config set repl-backlog-size 10mb
+ $R(1) config set repl-backlog-size 10mb
+ }
+
+ set cycle_start_time [clock milliseconds]
+ set bench_pid [exec src/redis-benchmark -s $R_unixsocket(0) -n 10000000 -r 1000 incr __rand_int__ > /dev/null &]
+ while 1 {
+ set elapsed [expr {[clock milliseconds]-$cycle_start_time}]
+ if {$elapsed > $duration*1000} break
+ if {rand() < .05} {
+ test "PSYNC2 #3899 regression: kill first replica" {
+ $R(1) client kill type master
+ }
+ }
+ if {rand() < .05} {
+ test "PSYNC2 #3899 regression: kill chained replica" {
+ $R(2) client kill type master
+ }
+ }
+ after 100
+ }
+ exec kill -9 $bench_pid
+
+ if {$debug_msg} {
+ for {set j 0} {$j < 100} {incr j} {
+ if {
+ [$R(0) debug digest] == [$R(1) debug digest] &&
+ [$R(1) debug digest] == [$R(2) debug digest]
+ } break
+ puts [$R(0) debug digest]
+ puts [$R(1) debug digest]
+ puts [$R(2) debug digest]
+ after 1000
+ }
+ }
+
+ test "PSYNC2 #3899 regression: verify consistency" {
+ wait_for_condition 50 1000 {
+ ([$R(0) debug digest] eq [$R(1) debug digest]) &&
+ ([$R(1) debug digest] eq [$R(2) debug digest])
+ } else {
+ fail "The three instances have different data sets"
+ }
+ }
+}}}
diff --git a/tests/integration/psync2.tcl b/tests/integration/psync2.tcl
new file mode 100644
index 0000000..4abe059
--- /dev/null
+++ b/tests/integration/psync2.tcl
@@ -0,0 +1,384 @@
+
+proc show_cluster_status {} {
+ uplevel 1 {
+ # The following is the regexp we use to match the log line
+ # time info. Logs are in the following form:
+ #
+ # 11296:M 25 May 2020 17:37:14.652 # Server initialized
+ set log_regexp {^[0-9]+:[A-Z] [0-9]+ [A-z]+ [0-9]+ ([0-9:.]+) .*}
+ set repl_regexp {(master|repl|sync|backlog|meaningful|offset)}
+
+ puts "Master ID is $master_id"
+ for {set j 0} {$j < 5} {incr j} {
+ puts "$j: sync_full: [status $R($j) sync_full]"
+ puts "$j: id1 : [status $R($j) master_replid]:[status $R($j) master_repl_offset]"
+ puts "$j: id2 : [status $R($j) master_replid2]:[status $R($j) second_repl_offset]"
+ puts "$j: backlog : firstbyte=[status $R($j) repl_backlog_first_byte_offset] len=[status $R($j) repl_backlog_histlen]"
+ puts "$j: x var is : [$R($j) GET x]"
+ puts "---"
+ }
+
+ # Show the replication logs of every instance, interleaving
+ # them by the log date.
+ #
+ # First: load the lines as lists for each instance.
+ array set log {}
+ for {set j 0} {$j < 5} {incr j} {
+ set fd [open $R_log($j)]
+ while {[gets $fd l] >= 0} {
+ if {[regexp $log_regexp $l] &&
+ [regexp -nocase $repl_regexp $l]} {
+ lappend log($j) $l
+ }
+ }
+ close $fd
+ }
+
+ # To interleave the lines, at every step consume the element of
+ # the list with the lowest time and remove it. Do it until
+ # all the lists are empty.
+ #
+ # regexp {^[0-9]+:[A-Z] [0-9]+ [A-z]+ [0-9]+ ([0-9:.]+) .*} $l - logdate
+ while 1 {
+ # Find the log with smallest time.
+ set empty 0
+ set best 0
+ set bestdate {}
+ for {set j 0} {$j < 5} {incr j} {
+ if {[llength $log($j)] == 0} {
+ incr empty
+ continue
+ }
+ regexp $log_regexp [lindex $log($j) 0] - date
+ if {$bestdate eq {}} {
+ set best $j
+ set bestdate $date
+ } else {
+ if {[string compare $bestdate $date] > 0} {
+ set best $j
+ set bestdate $date
+ }
+ }
+ }
+ if {$empty == 5} break ; # Our exit condition: no more logs
+
+ # Emit the one with the smallest time (that is the first
+ # event in the time line).
+ puts "\[$best port $R_port($best)\] [lindex $log($best) 0]"
+ set log($best) [lrange $log($best) 1 end]
+ }
+ }
+}
+
+start_server {tags {"psync2 external:skip"}} {
+start_server {} {
+start_server {} {
+start_server {} {
+start_server {} {
+ set master_id 0 ; # Current master
+ set start_time [clock seconds] ; # Test start time
+ set counter_value 0 ; # Current value of the Redis counter "x"
+
+ # Config
+ set debug_msg 0 ; # Enable additional debug messages
+
+ set no_exit 0 ; # Do not exit at end of the test
+
+ set duration 40 ; # Total test seconds
+
+ set genload 1 ; # Load master with writes at every cycle
+
+ set genload_time 5000 ; # Writes duration time in ms
+
+ set disconnect 1 ; # Break replication link between random
+ # master and slave instances while the
+ # master is loaded with writes.
+
+ set disconnect_period 1000 ; # Disconnect repl link every N ms.
+
+ for {set j 0} {$j < 5} {incr j} {
+ set R($j) [srv [expr 0-$j] client]
+ set R_host($j) [srv [expr 0-$j] host]
+ set R_port($j) [srv [expr 0-$j] port]
+ set R_id_from_port($R_port($j)) $j ;# To get a replica index by port
+ set R_log($j) [srv [expr 0-$j] stdout]
+ if {$debug_msg} {puts "Log file: [srv [expr 0-$j] stdout]"}
+ }
+
+ set cycle 0
+ while {([clock seconds]-$start_time) < $duration} {
+ incr cycle
+ test "PSYNC2: --- CYCLE $cycle ---" {}
+
+ # Create a random replication layout.
+ # Start with switching master (this simulates a failover).
+
+ # 1) Select the new master.
+ set master_id [randomInt 5]
+ set used [list $master_id]
+ test "PSYNC2: \[NEW LAYOUT\] Set #$master_id as master" {
+ $R($master_id) slaveof no one
+ $R($master_id) config set repl-ping-replica-period 1 ;# increase the chance that random ping will cause issues
+ if {$counter_value == 0} {
+ $R($master_id) set x $counter_value
+ }
+ }
+
+ # Build a lookup with the root master of each replica (head of the chain).
+ array set root_master {}
+ for {set j 0} {$j < 5} {incr j} {
+ set r $j
+ while {1} {
+ set r_master_port [status $R($r) master_port]
+ if {$r_master_port == ""} {
+ set root_master($j) $r
+ break
+ }
+ set r_master_id $R_id_from_port($r_master_port)
+ set r $r_master_id
+ }
+ }
+
+ # Wait for the newly detached master-replica chain (new master and existing replicas that were
+ # already connected to it, to get updated on the new replication id.
+ # This is needed to avoid a race that can result in a full sync when a replica that already
+ # got an updated repl id, tries to psync from one that's not yet aware of it.
+ wait_for_condition 50 1000 {
+ ([status $R(0) master_replid] == [status $R($root_master(0)) master_replid]) &&
+ ([status $R(1) master_replid] == [status $R($root_master(1)) master_replid]) &&
+ ([status $R(2) master_replid] == [status $R($root_master(2)) master_replid]) &&
+ ([status $R(3) master_replid] == [status $R($root_master(3)) master_replid]) &&
+ ([status $R(4) master_replid] == [status $R($root_master(4)) master_replid])
+ } else {
+ show_cluster_status
+ fail "Replica did not inherit the new replid."
+ }
+
+ # Build a lookup with the direct connection master of each replica.
+ # First loop that uses random to decide who replicates from who.
+ array set slave_to_master {}
+ while {[llength $used] != 5} {
+ while 1 {
+ set slave_id [randomInt 5]
+ if {[lsearch -exact $used $slave_id] == -1} break
+ }
+ set rand [randomInt [llength $used]]
+ set mid [lindex $used $rand]
+ set slave_to_master($slave_id) $mid
+ lappend used $slave_id
+ }
+
+ # 2) Attach all the slaves to a random instance
+ # Second loop that does the actual SLAVEOF command and make sure execute it in the right order.
+ while {[array size slave_to_master] > 0} {
+ foreach slave_id [array names slave_to_master] {
+ set mid $slave_to_master($slave_id)
+
+ # We only attach the replica to a random instance that already in the old/new chain.
+ if {$root_master($mid) == $root_master($master_id)} {
+ # Find a replica that can be attached to the new chain already attached to the new master.
+ # My new master is in the new chain.
+ } elseif {$root_master($mid) == $root_master($slave_id)} {
+ # My new master and I are in the old chain.
+ } else {
+ # In cycle 1, we do not care about the order.
+ if {$cycle != 1} {
+ # skipping this replica for now to avoid attaching in a bad order
+ # this is done to avoid an unexpected full sync, when we take a
+ # replica that already reconnected to the new chain and got a new replid
+ # and is then set to connect to a master that's still not aware of that new replid
+ continue
+ }
+ }
+
+ set master_host $R_host($master_id)
+ set master_port $R_port($master_id)
+
+ test "PSYNC2: Set #$slave_id to replicate from #$mid" {
+ $R($slave_id) slaveof $master_host $master_port
+ }
+
+ # Wait for replica to be connected before we proceed.
+ wait_for_condition 50 1000 {
+ [status $R($slave_id) master_link_status] == "up"
+ } else {
+ show_cluster_status
+ fail "Replica not reconnecting."
+ }
+
+ set root_master($slave_id) $root_master($mid)
+ unset slave_to_master($slave_id)
+ break
+ }
+ }
+
+ # Wait for replicas to sync. so next loop won't get -LOADING error
+ wait_for_condition 50 1000 {
+ [status $R([expr {($master_id+1)%5}]) master_link_status] == "up" &&
+ [status $R([expr {($master_id+2)%5}]) master_link_status] == "up" &&
+ [status $R([expr {($master_id+3)%5}]) master_link_status] == "up" &&
+ [status $R([expr {($master_id+4)%5}]) master_link_status] == "up"
+ } else {
+ show_cluster_status
+ fail "Replica not reconnecting"
+ }
+
+ # 3) Increment the counter and wait for all the instances
+ # to converge.
+ test "PSYNC2: cluster is consistent after failover" {
+ $R($master_id) incr x; incr counter_value
+ for {set j 0} {$j < 5} {incr j} {
+ wait_for_condition 50 1000 {
+ [$R($j) get x] == $counter_value
+ } else {
+ show_cluster_status
+ fail "Instance #$j x variable is inconsistent"
+ }
+ }
+ }
+
+ # 4) Generate load while breaking the connection of random
+ # slave-master pairs.
+ test "PSYNC2: generate load while killing replication links" {
+ set t [clock milliseconds]
+ set next_break [expr {$t+$disconnect_period}]
+ while {[clock milliseconds]-$t < $genload_time} {
+ if {$genload} {
+ $R($master_id) incr x; incr counter_value
+ }
+ if {[clock milliseconds] == $next_break} {
+ set next_break \
+ [expr {[clock milliseconds]+$disconnect_period}]
+ set slave_id [randomInt 5]
+ if {$disconnect} {
+ $R($slave_id) client kill type master
+ if {$debug_msg} {
+ puts "+++ Breaking link for replica #$slave_id"
+ }
+ }
+ }
+ }
+ }
+
+ # 5) Increment the counter and wait for all the instances
+ set x [$R($master_id) get x]
+ test "PSYNC2: cluster is consistent after load (x = $x)" {
+ for {set j 0} {$j < 5} {incr j} {
+ wait_for_condition 50 1000 {
+ [$R($j) get x] == $counter_value
+ } else {
+ show_cluster_status
+ fail "Instance #$j x variable is inconsistent"
+ }
+ }
+ }
+
+ # wait for all the slaves to be in sync.
+ set masteroff [status $R($master_id) master_repl_offset]
+ wait_for_condition 500 100 {
+ [status $R(0) master_repl_offset] >= $masteroff &&
+ [status $R(1) master_repl_offset] >= $masteroff &&
+ [status $R(2) master_repl_offset] >= $masteroff &&
+ [status $R(3) master_repl_offset] >= $masteroff &&
+ [status $R(4) master_repl_offset] >= $masteroff
+ } else {
+ show_cluster_status
+ fail "Replicas offsets didn't catch up with the master after too long time."
+ }
+
+ if {$debug_msg} {
+ show_cluster_status
+ }
+
+ test "PSYNC2: total sum of full synchronizations is exactly 4" {
+ set sum 0
+ for {set j 0} {$j < 5} {incr j} {
+ incr sum [status $R($j) sync_full]
+ }
+ if {$sum != 4} {
+ show_cluster_status
+ assert {$sum == 4}
+ }
+ }
+
+ # In absence of pings, are the instances really able to have
+ # the exact same offset?
+ $R($master_id) config set repl-ping-replica-period 3600
+ for {set j 0} {$j < 5} {incr j} {
+ if {$j == $master_id} continue
+ $R($j) config set repl-timeout 10000
+ }
+ wait_for_condition 500 100 {
+ [status $R($master_id) master_repl_offset] == [status $R(0) master_repl_offset] &&
+ [status $R($master_id) master_repl_offset] == [status $R(1) master_repl_offset] &&
+ [status $R($master_id) master_repl_offset] == [status $R(2) master_repl_offset] &&
+ [status $R($master_id) master_repl_offset] == [status $R(3) master_repl_offset] &&
+ [status $R($master_id) master_repl_offset] == [status $R(4) master_repl_offset]
+ } else {
+ show_cluster_status
+ fail "Replicas and master offsets were unable to match *exactly*."
+ }
+
+ # Limit anyway the maximum number of cycles. This is useful when the
+ # test is skipped via --only option of the test suite. In that case
+ # we don't want to see many seconds of this test being just skipped.
+ if {$cycle > 50} break
+ }
+
+ test "PSYNC2: Bring the master back again for next test" {
+ $R($master_id) slaveof no one
+ set master_host $R_host($master_id)
+ set master_port $R_port($master_id)
+ for {set j 0} {$j < 5} {incr j} {
+ if {$j == $master_id} continue
+ $R($j) slaveof $master_host $master_port
+ }
+
+ # Wait for replicas to sync. it is not enough to just wait for connected_slaves==4
+ # since we might do the check before the master realized that they're disconnected
+ wait_for_condition 50 1000 {
+ [status $R($master_id) connected_slaves] == 4 &&
+ [status $R([expr {($master_id+1)%5}]) master_link_status] == "up" &&
+ [status $R([expr {($master_id+2)%5}]) master_link_status] == "up" &&
+ [status $R([expr {($master_id+3)%5}]) master_link_status] == "up" &&
+ [status $R([expr {($master_id+4)%5}]) master_link_status] == "up"
+ } else {
+ show_cluster_status
+ fail "Replica not reconnecting"
+ }
+ }
+
+ test "PSYNC2: Partial resync after restart using RDB aux fields" {
+ # Pick a random slave
+ set slave_id [expr {($master_id+1)%5}]
+ set sync_count [status $R($master_id) sync_full]
+ set sync_partial [status $R($master_id) sync_partial_ok]
+ set sync_partial_err [status $R($master_id) sync_partial_err]
+ catch {
+ # Make sure the server saves an RDB on shutdown
+ $R($slave_id) config set save "900 1"
+ $R($slave_id) config rewrite
+ restart_server [expr {0-$slave_id}] true false
+ set R($slave_id) [srv [expr {0-$slave_id}] client]
+ }
+ # note: just waiting for connected_slaves==4 has a race condition since
+ # we might do the check before the master realized that the slave disconnected
+ wait_for_condition 50 1000 {
+ [status $R($master_id) sync_partial_ok] == $sync_partial + 1
+ } else {
+ puts "prev sync_full: $sync_count"
+ puts "prev sync_partial_ok: $sync_partial"
+ puts "prev sync_partial_err: $sync_partial_err"
+ puts [$R($master_id) info stats]
+ show_cluster_status
+ fail "Replica didn't partial sync"
+ }
+ set new_sync_count [status $R($master_id) sync_full]
+ assert {$sync_count == $new_sync_count}
+ }
+
+ if {$no_exit} {
+ while 1 { puts -nonewline .; flush stdout; after 1000}
+ }
+
+}}}}}
diff --git a/tests/integration/rdb.tcl b/tests/integration/rdb.tcl
new file mode 100644
index 0000000..cce2167
--- /dev/null
+++ b/tests/integration/rdb.tcl
@@ -0,0 +1,419 @@
+tags {"rdb external:skip"} {
+
+set server_path [tmpdir "server.rdb-encoding-test"]
+
+# Copy RDB with different encodings in server path
+exec cp tests/assets/encodings.rdb $server_path
+exec cp tests/assets/list-quicklist.rdb $server_path
+
+start_server [list overrides [list "dir" $server_path "dbfilename" "list-quicklist.rdb" save ""]] {
+ test "test old version rdb file" {
+ r select 0
+ assert_equal [r get x] 7
+ assert_encoding listpack list
+ r lpop list
+ } {7}
+}
+
+start_server [list overrides [list "dir" $server_path "dbfilename" "encodings.rdb"]] {
+ test "RDB encoding loading test" {
+ r select 0
+ csvdump r
+ } {"0","compressible","string","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+"0","hash","hash","a","1","aa","10","aaa","100","b","2","bb","20","bbb","200","c","3","cc","30","ccc","300","ddd","400","eee","5000000000",
+"0","hash_zipped","hash","a","1","b","2","c","3",
+"0","list","list","1","2","3","a","b","c","100000","6000000000","1","2","3","a","b","c","100000","6000000000","1","2","3","a","b","c","100000","6000000000",
+"0","list_zipped","list","1","2","3","a","b","c","100000","6000000000",
+"0","number","string","10"
+"0","set","set","1","100000","2","3","6000000000","a","b","c",
+"0","set_zipped_1","set","1","2","3","4",
+"0","set_zipped_2","set","100000","200000","300000","400000",
+"0","set_zipped_3","set","1000000000","2000000000","3000000000","4000000000","5000000000","6000000000",
+"0","string","string","Hello World"
+"0","zset","zset","a","1","b","2","c","3","aa","10","bb","20","cc","30","aaa","100","bbb","200","ccc","300","aaaa","1000","cccc","123456789","bbbb","5000000000",
+"0","zset_zipped","zset","a","1","b","2","c","3",
+}
+}
+
+set server_path [tmpdir "server.rdb-startup-test"]
+
+start_server [list overrides [list "dir" $server_path] keep_persistence true] {
+ test {Server started empty with non-existing RDB file} {
+ debug_digest
+ } {0000000000000000000000000000000000000000}
+ # Save an RDB file, needed for the next test.
+ r save
+}
+
+start_server [list overrides [list "dir" $server_path] keep_persistence true] {
+ test {Server started empty with empty RDB file} {
+ debug_digest
+ } {0000000000000000000000000000000000000000}
+}
+
+start_server [list overrides [list "dir" $server_path] keep_persistence true] {
+ test {Test RDB stream encoding} {
+ for {set j 0} {$j < 1000} {incr j} {
+ if {rand() < 0.9} {
+ r xadd stream * foo abc
+ } else {
+ r xadd stream * bar $j
+ }
+ }
+ r xgroup create stream mygroup 0
+ set records [r xreadgroup GROUP mygroup Alice COUNT 2 STREAMS stream >]
+ r xdel stream [lindex [lindex [lindex [lindex $records 0] 1] 1] 0]
+ r xack stream mygroup [lindex [lindex [lindex [lindex $records 0] 1] 0] 0]
+ set digest [debug_digest]
+ r config set sanitize-dump-payload no
+ r debug reload
+ set newdigest [debug_digest]
+ assert {$digest eq $newdigest}
+ }
+ test {Test RDB stream encoding - sanitize dump} {
+ r config set sanitize-dump-payload yes
+ r debug reload
+ set newdigest [debug_digest]
+ assert {$digest eq $newdigest}
+ }
+ # delete the stream, maybe valgrind will find something
+ r del stream
+}
+
+# Helper function to start a server and kill it, just to check the error
+# logged.
+set defaults {}
+proc start_server_and_kill_it {overrides code} {
+ upvar defaults defaults srv srv server_path server_path
+ set config [concat $defaults $overrides]
+ set srv [start_server [list overrides $config keep_persistence true]]
+ uplevel 1 $code
+ kill_server $srv
+}
+
+# Make the RDB file unreadable
+file attributes [file join $server_path dump.rdb] -permissions 0222
+
+# Detect root account (it is able to read the file even with 002 perm)
+set isroot 0
+catch {
+ open [file join $server_path dump.rdb]
+ set isroot 1
+}
+
+# Now make sure the server aborted with an error
+if {!$isroot} {
+ start_server_and_kill_it [list "dir" $server_path] {
+ test {Server should not start if RDB file can't be open} {
+ wait_for_condition 50 100 {
+ [string match {*Fatal error loading*} \
+ [exec tail -1 < [dict get $srv stdout]]]
+ } else {
+ fail "Server started even if RDB was unreadable!"
+ }
+ }
+ }
+}
+
+# Fix permissions of the RDB file.
+file attributes [file join $server_path dump.rdb] -permissions 0666
+
+# Corrupt its CRC64 checksum.
+set filesize [file size [file join $server_path dump.rdb]]
+set fd [open [file join $server_path dump.rdb] r+]
+fconfigure $fd -translation binary
+seek $fd -8 end
+puts -nonewline $fd "foobar00"; # Corrupt the checksum
+close $fd
+
+# Now make sure the server aborted with an error
+start_server_and_kill_it [list "dir" $server_path] {
+ test {Server should not start if RDB is corrupted} {
+ wait_for_condition 50 100 {
+ [string match {*CRC error*} \
+ [exec tail -10 < [dict get $srv stdout]]]
+ } else {
+ fail "Server started even if RDB was corrupted!"
+ }
+ }
+}
+
+start_server {} {
+ test {Test FLUSHALL aborts bgsave} {
+ r config set save ""
+ # 5000 keys with 1ms sleep per key should take 5 second
+ r config set rdb-key-save-delay 1000
+ populate 5000
+ assert_lessthan 999 [s rdb_changes_since_last_save]
+ r bgsave
+ assert_equal [s rdb_bgsave_in_progress] 1
+ r flushall
+ # wait a second max (bgsave should take 5)
+ wait_for_condition 10 100 {
+ [s rdb_bgsave_in_progress] == 0
+ } else {
+ fail "bgsave not aborted"
+ }
+ # verify that bgsave failed, by checking that the change counter is still high
+ assert_lessthan 999 [s rdb_changes_since_last_save]
+ # make sure the server is still writable
+ r set x xx
+ }
+
+ test {bgsave resets the change counter} {
+ r config set rdb-key-save-delay 0
+ r bgsave
+ wait_for_condition 50 100 {
+ [s rdb_bgsave_in_progress] == 0
+ } else {
+ fail "bgsave not done"
+ }
+ assert_equal [s rdb_changes_since_last_save] 0
+ }
+}
+
+test {client freed during loading} {
+ start_server [list overrides [list key-load-delay 50 loading-process-events-interval-bytes 1024 rdbcompression no save "900 1"]] {
+ # create a big rdb that will take long to load. it is important
+ # for keys to be big since the server processes events only once in 2mb.
+ # 100mb of rdb, 100k keys will load in more than 5 seconds
+ r debug populate 100000 key 1000
+
+ restart_server 0 false false
+
+ # make sure it's still loading
+ assert_equal [s loading] 1
+
+ # connect and disconnect 5 clients
+ set clients {}
+ for {set j 0} {$j < 5} {incr j} {
+ lappend clients [redis_deferring_client]
+ }
+ foreach rd $clients {
+ $rd debug log bla
+ }
+ foreach rd $clients {
+ $rd read
+ }
+ foreach rd $clients {
+ $rd close
+ }
+
+ # make sure the server freed the clients
+ wait_for_condition 100 100 {
+ [s connected_clients] < 3
+ } else {
+ fail "clients didn't disconnect"
+ }
+
+ # make sure it's still loading
+ assert_equal [s loading] 1
+
+ # no need to keep waiting for loading to complete
+ exec kill [srv 0 pid]
+ }
+}
+
+start_server {} {
+ test {Test RDB load info} {
+ r debug populate 1000
+ r save
+ assert {[r lastsave] <= [lindex [r time] 0]}
+ restart_server 0 true false
+ wait_done_loading r
+ assert {[s rdb_last_load_keys_expired] == 0}
+ assert {[s rdb_last_load_keys_loaded] == 1000}
+
+ r debug set-active-expire 0
+ for {set j 0} {$j < 1024} {incr j} {
+ r select [expr $j%16]
+ r set $j somevalue px 10
+ }
+ after 20
+
+ r save
+ restart_server 0 true false
+ wait_done_loading r
+ assert {[s rdb_last_load_keys_expired] == 1024}
+ assert {[s rdb_last_load_keys_loaded] == 1000}
+ }
+}
+
+# Our COW metrics (Private_Dirty) work only on Linux
+set system_name [string tolower [exec uname -s]]
+set page_size [exec getconf PAGESIZE]
+if {$system_name eq {linux} && $page_size == 4096} {
+
+start_server {overrides {save ""}} {
+ test {Test child sending info} {
+ # make sure that rdb_last_cow_size and current_cow_size are zero (the test using new server),
+ # so that the comparisons during the test will be valid
+ assert {[s current_cow_size] == 0}
+ assert {[s current_save_keys_processed] == 0}
+ assert {[s current_save_keys_total] == 0}
+
+ assert {[s rdb_last_cow_size] == 0}
+
+ # using a 200us delay, the bgsave is empirically taking about 10 seconds.
+ # we need it to take more than some 5 seconds, since redis only report COW once a second.
+ r config set rdb-key-save-delay 200
+ r config set loglevel debug
+
+ # populate the db with 10k keys of 512B each (since we want to measure the COW size by
+ # changing some keys and read the reported COW size, we are using small key size to prevent from
+ # the "dismiss mechanism" free memory and reduce the COW size)
+ set rd [redis_deferring_client 0]
+ set size 500 ;# aim for the 512 bin (sds overhead)
+ set cmd_count 10000
+ for {set k 0} {$k < $cmd_count} {incr k} {
+ $rd set key$k [string repeat A $size]
+ }
+
+ for {set k 0} {$k < $cmd_count} {incr k} {
+ catch { $rd read }
+ }
+
+ $rd close
+
+ # start background rdb save
+ r bgsave
+
+ set current_save_keys_total [s current_save_keys_total]
+ if {$::verbose} {
+ puts "Keys before bgsave start: $current_save_keys_total"
+ }
+
+ # on each iteration, we will write some key to the server to trigger copy-on-write, and
+ # wait to see that it reflected in INFO.
+ set iteration 1
+ set key_idx 0
+ while 1 {
+ # take samples before writing new data to the server
+ set cow_size [s current_cow_size]
+ if {$::verbose} {
+ puts "COW info before copy-on-write: $cow_size"
+ }
+
+ set keys_processed [s current_save_keys_processed]
+ if {$::verbose} {
+ puts "current_save_keys_processed info : $keys_processed"
+ }
+
+ # trigger copy-on-write
+ set modified_keys 16
+ for {set k 0} {$k < $modified_keys} {incr k} {
+ r setrange key$key_idx 0 [string repeat B $size]
+ incr key_idx 1
+ }
+
+ # changing 16 keys (512B each) will create at least 8192 COW (2 pages), but we don't want the test
+ # to be too strict, so we check for a change of at least 4096 bytes
+ set exp_cow [expr $cow_size + 4096]
+ # wait to see that current_cow_size value updated (as long as the child is in progress)
+ wait_for_condition 80 100 {
+ [s rdb_bgsave_in_progress] == 0 ||
+ [s current_cow_size] >= $exp_cow &&
+ [s current_save_keys_processed] > $keys_processed &&
+ [s current_fork_perc] > 0
+ } else {
+ if {$::verbose} {
+ puts "COW info on fail: [s current_cow_size]"
+ puts [exec tail -n 100 < [srv 0 stdout]]
+ }
+ fail "COW info wasn't reported"
+ }
+
+ # assert that $keys_processed is not greater than total keys.
+ assert_morethan_equal $current_save_keys_total $keys_processed
+
+ # for no accurate, stop after 2 iterations
+ if {!$::accurate && $iteration == 2} {
+ break
+ }
+
+ # stop iterating if the bgsave completed
+ if { [s rdb_bgsave_in_progress] == 0 } {
+ break
+ }
+
+ incr iteration 1
+ }
+
+ # make sure we saw report of current_cow_size
+ if {$iteration < 2 && $::verbose} {
+ puts [exec tail -n 100 < [srv 0 stdout]]
+ }
+ assert_morethan_equal $iteration 2
+
+ # if bgsave completed, check that rdb_last_cow_size (fork exit report)
+ # is at least 90% of last rdb_active_cow_size.
+ if { [s rdb_bgsave_in_progress] == 0 } {
+ set final_cow [s rdb_last_cow_size]
+ set cow_size [expr $cow_size * 0.9]
+ if {$final_cow < $cow_size && $::verbose} {
+ puts [exec tail -n 100 < [srv 0 stdout]]
+ }
+ assert_morethan_equal $final_cow $cow_size
+ }
+ }
+}
+} ;# system_name
+
+exec cp -f tests/assets/scriptbackup.rdb $server_path
+start_server [list overrides [list "dir" $server_path "dbfilename" "scriptbackup.rdb" "appendonly" "no"]] {
+ # the script is: "return redis.call('set', 'foo', 'bar')""
+ # its sha1 is: a0c38691e9fffe4563723c32ba77a34398e090e6
+ test {script won't load anymore if it's in rdb} {
+ assert_equal [r script exists a0c38691e9fffe4563723c32ba77a34398e090e6] 0
+ }
+}
+
+start_server {} {
+ test "failed bgsave prevents writes" {
+ # Make sure the server saves an RDB on shutdown
+ r config set save "900 1"
+
+ r config set rdb-key-save-delay 10000000
+ populate 1000
+ r set x x
+ r bgsave
+ set pid1 [get_child_pid 0]
+ catch {exec kill -9 $pid1}
+ waitForBgsave r
+
+ # make sure a read command succeeds
+ assert_equal [r get x] x
+
+ # make sure a write command fails
+ assert_error {MISCONF *} {r set x y}
+
+ # repeate with script
+ assert_error {MISCONF *} {r eval {
+ return redis.call('set','x',1)
+ } 1 x
+ }
+ assert_equal {x} [r eval {
+ return redis.call('get','x')
+ } 1 x
+ ]
+
+ # again with script using shebang
+ assert_error {MISCONF *} {r eval {#!lua
+ return redis.call('set','x',1)
+ } 1 x
+ }
+ assert_equal {x} [r eval {#!lua flags=no-writes
+ return redis.call('get','x')
+ } 1 x
+ ]
+
+ r config set rdb-key-save-delay 0
+ r bgsave
+ waitForBgsave r
+
+ # server is writable again
+ r set x y
+ } {OK}
+}
+
+} ;# tags
diff --git a/tests/integration/redis-benchmark.tcl b/tests/integration/redis-benchmark.tcl
new file mode 100644
index 0000000..8035632
--- /dev/null
+++ b/tests/integration/redis-benchmark.tcl
@@ -0,0 +1,171 @@
+source tests/support/benchmark.tcl
+
+
+proc cmdstat {cmd} {
+ return [cmdrstat $cmd r]
+}
+
+# common code to reset stats, flush the db and run redis-benchmark
+proc common_bench_setup {cmd} {
+ r config resetstat
+ r flushall
+ if {[catch { exec {*}$cmd } error]} {
+ set first_line [lindex [split $error "\n"] 0]
+ puts [colorstr red "redis-benchmark non zero code. first line: $first_line"]
+ fail "redis-benchmark non zero code. first line: $first_line"
+ }
+}
+
+# we use this extra asserts on a simple set,get test for features like uri parsing
+# and other simple flag related tests
+proc default_set_get_checks {} {
+ assert_match {*calls=10,*} [cmdstat set]
+ assert_match {*calls=10,*} [cmdstat get]
+ # assert one of the non benchmarked commands is not present
+ assert_match {} [cmdstat lrange]
+}
+
+start_server {tags {"benchmark network external:skip logreqres:skip"}} {
+ start_server {} {
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+
+ test {benchmark: set,get} {
+ set cmd [redisbenchmark $master_host $master_port "-c 5 -n 10 -t set,get"]
+ common_bench_setup $cmd
+ default_set_get_checks
+ }
+
+ test {benchmark: connecting using URI set,get} {
+ set cmd [redisbenchmarkuri $master_host $master_port "-c 5 -n 10 -t set,get"]
+ common_bench_setup $cmd
+ default_set_get_checks
+ }
+
+ test {benchmark: connecting using URI with authentication set,get} {
+ r config set masterauth pass
+ set cmd [redisbenchmarkuriuserpass $master_host $master_port "default" pass "-c 5 -n 10 -t set,get"]
+ common_bench_setup $cmd
+ default_set_get_checks
+ }
+
+ test {benchmark: full test suite} {
+ set cmd [redisbenchmark $master_host $master_port "-c 10 -n 100"]
+ common_bench_setup $cmd
+
+ # ping total calls are 2*issued commands per test due to PING_INLINE and PING_MBULK
+ assert_match {*calls=200,*} [cmdstat ping]
+ assert_match {*calls=100,*} [cmdstat set]
+ assert_match {*calls=100,*} [cmdstat get]
+ assert_match {*calls=100,*} [cmdstat incr]
+ # lpush total calls are 2*issued commands per test due to the lrange tests
+ assert_match {*calls=200,*} [cmdstat lpush]
+ assert_match {*calls=100,*} [cmdstat rpush]
+ assert_match {*calls=100,*} [cmdstat lpop]
+ assert_match {*calls=100,*} [cmdstat rpop]
+ assert_match {*calls=100,*} [cmdstat sadd]
+ assert_match {*calls=100,*} [cmdstat hset]
+ assert_match {*calls=100,*} [cmdstat spop]
+ assert_match {*calls=100,*} [cmdstat zadd]
+ assert_match {*calls=100,*} [cmdstat zpopmin]
+ assert_match {*calls=400,*} [cmdstat lrange]
+ assert_match {*calls=100,*} [cmdstat mset]
+ # assert one of the non benchmarked commands is not present
+ assert_match {} [cmdstat rpoplpush]
+ }
+
+ test {benchmark: multi-thread set,get} {
+ set cmd [redisbenchmark $master_host $master_port "--threads 10 -c 5 -n 10 -t set,get"]
+ common_bench_setup $cmd
+ default_set_get_checks
+
+ # ensure only one key was populated
+ assert_match {1} [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d]
+ }
+
+ test {benchmark: pipelined full set,get} {
+ set cmd [redisbenchmark $master_host $master_port "-P 5 -c 10 -n 10010 -t set,get"]
+ common_bench_setup $cmd
+ assert_match {*calls=10010,*} [cmdstat set]
+ assert_match {*calls=10010,*} [cmdstat get]
+ # assert one of the non benchmarked commands is not present
+ assert_match {} [cmdstat lrange]
+
+ # ensure only one key was populated
+ assert_match {1} [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d]
+ }
+
+ test {benchmark: arbitrary command} {
+ set cmd [redisbenchmark $master_host $master_port "-c 5 -n 150 INCRBYFLOAT mykey 10.0"]
+ common_bench_setup $cmd
+ assert_match {*calls=150,*} [cmdstat incrbyfloat]
+ # assert one of the non benchmarked commands is not present
+ assert_match {} [cmdstat get]
+
+ # ensure only one key was populated
+ assert_match {1} [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d]
+ }
+
+ test {benchmark: keyspace length} {
+ set cmd [redisbenchmark $master_host $master_port "-r 50 -t set -n 1000"]
+ common_bench_setup $cmd
+ assert_match {*calls=1000,*} [cmdstat set]
+ # assert one of the non benchmarked commands is not present
+ assert_match {} [cmdstat get]
+
+ # ensure the keyspace has the desired size
+ assert_match {50} [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d]
+ }
+
+ test {benchmark: clients idle mode should return error when reached maxclients limit} {
+ set cmd [redisbenchmark $master_host $master_port "-c 10 -I"]
+ set original_maxclients [lindex [r config get maxclients] 1]
+ r config set maxclients 5
+ catch { exec {*}$cmd } error
+ assert_match "*Error*" $error
+ r config set maxclients $original_maxclients
+ }
+
+ # tls specific tests
+ if {$::tls} {
+ test {benchmark: specific tls-ciphers} {
+ set cmd [redisbenchmark $master_host $master_port "-r 50 -t set -n 1000 --tls-ciphers \"DEFAULT:-AES128-SHA256\""]
+ common_bench_setup $cmd
+ assert_match {*calls=1000,*} [cmdstat set]
+ # assert one of the non benchmarked commands is not present
+ assert_match {} [cmdstat get]
+ }
+
+ test {benchmark: tls connecting using URI with authentication set,get} {
+ r config set masterauth pass
+ set cmd [redisbenchmarkuriuserpass $master_host $master_port "default" pass "-c 5 -n 10 -t set,get"]
+ common_bench_setup $cmd
+ default_set_get_checks
+ }
+
+ test {benchmark: specific tls-ciphersuites} {
+ r flushall
+ r config resetstat
+ set ciphersuites_supported 1
+ set cmd [redisbenchmark $master_host $master_port "-r 50 -t set -n 1000 --tls-ciphersuites \"TLS_AES_128_GCM_SHA256\""]
+ if {[catch { exec {*}$cmd } error]} {
+ set first_line [lindex [split $error "\n"] 0]
+ if {[string match "*Invalid option*" $first_line]} {
+ set ciphersuites_supported 0
+ if {$::verbose} {
+ puts "Skipping test, TLSv1.3 not supported."
+ }
+ } else {
+ puts [colorstr red "redis-benchmark non zero code. first line: $first_line"]
+ fail "redis-benchmark non zero code. first line: $first_line"
+ }
+ }
+ if {$ciphersuites_supported} {
+ assert_match {*calls=1000,*} [cmdstat set]
+ # assert one of the non benchmarked commands is not present
+ assert_match {} [cmdstat get]
+ }
+ }
+ }
+ }
+}
diff --git a/tests/integration/redis-cli.tcl b/tests/integration/redis-cli.tcl
new file mode 100644
index 0000000..da82dda
--- /dev/null
+++ b/tests/integration/redis-cli.tcl
@@ -0,0 +1,609 @@
+source tests/support/cli.tcl
+
+if {$::singledb} {
+ set ::dbnum 0
+} else {
+ set ::dbnum 9
+}
+
+start_server {tags {"cli"}} {
+ proc open_cli {{opts ""} {infile ""}} {
+ if { $opts == "" } {
+ set opts "-n $::dbnum"
+ }
+ set ::env(TERM) dumb
+ set cmdline [rediscli [srv host] [srv port] $opts]
+ if {$infile ne ""} {
+ set cmdline "$cmdline < $infile"
+ set mode "r"
+ } else {
+ set mode "r+"
+ }
+ set fd [open "|$cmdline" $mode]
+ fconfigure $fd -buffering none
+ fconfigure $fd -blocking false
+ fconfigure $fd -translation binary
+ set _ $fd
+ }
+
+ proc close_cli {fd} {
+ close $fd
+ }
+
+ proc read_cli {fd} {
+ set ret [read $fd]
+ while {[string length $ret] == 0} {
+ after 10
+ set ret [read $fd]
+ }
+
+ # We may have a short read, try to read some more.
+ set empty_reads 0
+ while {$empty_reads < 5} {
+ set buf [read $fd]
+ if {[string length $buf] == 0} {
+ after 10
+ incr empty_reads
+ } else {
+ append ret $buf
+ set empty_reads 0
+ }
+ }
+ return $ret
+ }
+
+ proc write_cli {fd buf} {
+ puts $fd $buf
+ flush $fd
+ }
+
+ # Helpers to run tests in interactive mode
+
+ proc format_output {output} {
+ set _ [string trimright $output "\n"]
+ }
+
+ proc run_command {fd cmd} {
+ write_cli $fd $cmd
+ set _ [format_output [read_cli $fd]]
+ }
+
+ proc test_interactive_cli {name code} {
+ set ::env(FAKETTY) 1
+ set fd [open_cli]
+ test "Interactive CLI: $name" $code
+ close_cli $fd
+ unset ::env(FAKETTY)
+ }
+
+ proc test_interactive_nontty_cli {name code} {
+ set fd [open_cli]
+ test "Interactive non-TTY CLI: $name" $code
+ close_cli $fd
+ }
+
+ # Helpers to run tests where stdout is not a tty
+ proc write_tmpfile {contents} {
+ set tmp [tmpfile "cli"]
+ set tmpfd [open $tmp "w"]
+ puts -nonewline $tmpfd $contents
+ close $tmpfd
+ set _ $tmp
+ }
+
+ proc _run_cli {host port db opts args} {
+ set cmd [rediscli $host $port [list -n $db {*}$args]]
+ foreach {key value} $opts {
+ if {$key eq "pipe"} {
+ set cmd "sh -c \"$value | $cmd\""
+ }
+ if {$key eq "path"} {
+ set cmd "$cmd < $value"
+ }
+ }
+
+ set fd [open "|$cmd" "r"]
+ fconfigure $fd -buffering none
+ fconfigure $fd -translation binary
+ set resp [read $fd 1048576]
+ close $fd
+ set _ [format_output $resp]
+ }
+
+ proc run_cli {args} {
+ _run_cli [srv host] [srv port] $::dbnum {} {*}$args
+ }
+
+ proc run_cli_with_input_pipe {mode cmd args} {
+ if {$mode == "x" } {
+ _run_cli [srv host] [srv port] $::dbnum [list pipe $cmd] -x {*}$args
+ } elseif {$mode == "X"} {
+ _run_cli [srv host] [srv port] $::dbnum [list pipe $cmd] -X tag {*}$args
+ }
+ }
+
+ proc run_cli_with_input_file {mode path args} {
+ if {$mode == "x" } {
+ _run_cli [srv host] [srv port] $::dbnum [list path $path] -x {*}$args
+ } elseif {$mode == "X"} {
+ _run_cli [srv host] [srv port] $::dbnum [list path $path] -X tag {*}$args
+ }
+ }
+
+ proc run_cli_host_port_db {host port db args} {
+ _run_cli $host $port $db {} {*}$args
+ }
+
+ proc test_nontty_cli {name code} {
+ test "Non-interactive non-TTY CLI: $name" $code
+ }
+
+ # Helpers to run tests where stdout is a tty (fake it)
+ proc test_tty_cli {name code} {
+ set ::env(FAKETTY) 1
+ test "Non-interactive TTY CLI: $name" $code
+ unset ::env(FAKETTY)
+ }
+
+ test_interactive_cli "INFO response should be printed raw" {
+ set lines [split [run_command $fd info] "\n"]
+ foreach line $lines {
+ # Info lines end in \r\n, so they now end in \r.
+ if {![regexp {^\r$|^#|^[^#:]+:} $line]} {
+ fail "Malformed info line: $line"
+ }
+ }
+ }
+
+ test_interactive_cli "Status reply" {
+ assert_equal "OK" [run_command $fd "set key foo"]
+ }
+
+ test_interactive_cli "Integer reply" {
+ assert_equal "(integer) 1" [run_command $fd "incr counter"]
+ }
+
+ test_interactive_cli "Bulk reply" {
+ r set key foo
+ assert_equal "\"foo\"" [run_command $fd "get key"]
+ }
+
+ test_interactive_cli "Multi-bulk reply" {
+ r rpush list foo
+ r rpush list bar
+ assert_equal "1) \"foo\"\n2) \"bar\"" [run_command $fd "lrange list 0 -1"]
+ }
+
+ test_interactive_cli "Parsing quotes" {
+ assert_equal "OK" [run_command $fd "set key \"bar\""]
+ assert_equal "bar" [r get key]
+ assert_equal "OK" [run_command $fd "set key \" bar \""]
+ assert_equal " bar " [r get key]
+ assert_equal "OK" [run_command $fd "set key \"\\\"bar\\\"\""]
+ assert_equal "\"bar\"" [r get key]
+ assert_equal "OK" [run_command $fd "set key \"\tbar\t\""]
+ assert_equal "\tbar\t" [r get key]
+
+ # invalid quotation
+ assert_equal "Invalid argument(s)" [run_command $fd "get \"\"key"]
+ assert_equal "Invalid argument(s)" [run_command $fd "get \"key\"x"]
+
+ # quotes after the argument are weird, but should be allowed
+ assert_equal "OK" [run_command $fd "set key\"\" bar"]
+ assert_equal "bar" [r get key]
+ }
+
+ test_interactive_cli "Subscribed mode" {
+ if {$::force_resp3} {
+ run_command $fd "hello 3"
+ }
+
+ set reading "Reading messages... (press Ctrl-C to quit or any key to type command)\r"
+ set erase "\033\[K"; # Erases the "Reading messages..." line.
+
+ # Subscribe to some channels.
+ set sub1 "1) \"subscribe\"\n2) \"ch1\"\n3) (integer) 1\n"
+ set sub2 "1) \"subscribe\"\n2) \"ch2\"\n3) (integer) 2\n"
+ set sub3 "1) \"subscribe\"\n2) \"ch3\"\n3) (integer) 3\n"
+ assert_equal $sub1$sub2$sub3$reading \
+ [run_command $fd "subscribe ch1 ch2 ch3"]
+
+ # Receive pubsub message.
+ r publish ch2 hello
+ set message "1) \"message\"\n2) \"ch2\"\n3) \"hello\"\n"
+ assert_equal $erase$message$reading [read_cli $fd]
+
+ # Unsubscribe some.
+ set unsub1 "1) \"unsubscribe\"\n2) \"ch1\"\n3) (integer) 2\n"
+ set unsub2 "1) \"unsubscribe\"\n2) \"ch2\"\n3) (integer) 1\n"
+ assert_equal $erase$unsub1$unsub2$reading \
+ [run_command $fd "unsubscribe ch1 ch2"]
+
+ run_command $fd "hello 2"
+
+ # Command forbidden in subscribed mode (RESP2).
+ set err "(error) ERR Can't execute 'get': only (P|S)SUBSCRIBE / (P|S)UNSUBSCRIBE / PING / QUIT / RESET are allowed in this context\n"
+ assert_equal $erase$err$reading [run_command $fd "get k"]
+
+ # Command allowed in subscribed mode.
+ set pong "1) \"pong\"\n2) \"\"\n"
+ assert_equal $erase$pong$reading [run_command $fd "ping"]
+
+ # Reset exits subscribed mode.
+ assert_equal ${erase}RESET [run_command $fd "reset"]
+ assert_equal PONG [run_command $fd "ping"]
+
+ # Check TTY output of push messages in RESP3 has ")" prefix (to be changed to ">" in the future).
+ assert_match "1#*" [run_command $fd "hello 3"]
+ set sub1 "1) \"subscribe\"\n2) \"ch1\"\n3) (integer) 1\n"
+ assert_equal $sub1$reading \
+ [run_command $fd "subscribe ch1"]
+ }
+
+ test_interactive_nontty_cli "Subscribed mode" {
+ # Raw output and no "Reading messages..." info message.
+ # Use RESP3 in this test case.
+ assert_match {*proto 3*} [run_command $fd "hello 3"]
+
+ # Subscribe to some channels.
+ set sub1 "subscribe\nch1\n1"
+ set sub2 "subscribe\nch2\n2"
+ assert_equal $sub1\n$sub2 \
+ [run_command $fd "subscribe ch1 ch2"]
+
+ assert_equal OK [run_command $fd "client tracking on"]
+ assert_equal OK [run_command $fd "set k 42"]
+ assert_equal 42 [run_command $fd "get k"]
+
+ # Interleaving invalidate and pubsub messages.
+ r publish ch1 hello
+ r del k
+ r publish ch2 world
+ set message1 "message\nch1\nhello"
+ set invalidate "invalidate\nk"
+ set message2 "message\nch2\nworld"
+ assert_equal $message1\n$invalidate\n$message2\n [read_cli $fd]
+
+ # Unsubscribe all.
+ set unsub1 "unsubscribe\nch1\n1"
+ set unsub2 "unsubscribe\nch2\n0"
+ assert_equal $unsub1\n$unsub2 [run_command $fd "unsubscribe ch1 ch2"]
+ }
+
+ test_tty_cli "Status reply" {
+ assert_equal "OK" [run_cli set key bar]
+ assert_equal "bar" [r get key]
+ }
+
+ test_tty_cli "Integer reply" {
+ r del counter
+ assert_equal "(integer) 1" [run_cli incr counter]
+ }
+
+ test_tty_cli "Bulk reply" {
+ r set key "tab\tnewline\n"
+ assert_equal "\"tab\\tnewline\\n\"" [run_cli get key]
+ }
+
+ test_tty_cli "Multi-bulk reply" {
+ r del list
+ r rpush list foo
+ r rpush list bar
+ assert_equal "1) \"foo\"\n2) \"bar\"" [run_cli lrange list 0 -1]
+ }
+
+ test_tty_cli "Read last argument from pipe" {
+ assert_equal "OK" [run_cli_with_input_pipe x "echo foo" set key]
+ assert_equal "foo\n" [r get key]
+
+ assert_equal "OK" [run_cli_with_input_pipe X "echo foo" set key2 tag]
+ assert_equal "foo\n" [r get key2]
+ }
+
+ test_tty_cli "Read last argument from file" {
+ set tmpfile [write_tmpfile "from file"]
+
+ assert_equal "OK" [run_cli_with_input_file x $tmpfile set key]
+ assert_equal "from file" [r get key]
+
+ assert_equal "OK" [run_cli_with_input_file X $tmpfile set key2 tag]
+ assert_equal "from file" [r get key2]
+
+ file delete $tmpfile
+ }
+
+ test_tty_cli "Escape character in JSON mode" {
+ # reverse solidus
+ r hset solidus \/ \/
+ assert_equal \/ \/ [run_cli hgetall solidus]
+ set escaped_reverse_solidus \"\\"
+ assert_equal $escaped_reverse_solidus $escaped_reverse_solidus [run_cli --json hgetall \/]
+ # non printable (0xF0 in ISO-8859-1, not UTF-8(0xC3 0xB0))
+ set eth "\u00f0\u0065"
+ r hset eth test $eth
+ assert_equal \"\\xf0e\" [run_cli hget eth test]
+ assert_equal \"\u00f0e\" [run_cli --json hget eth test]
+ assert_equal \"\\\\xf0e\" [run_cli --quoted-json hget eth test]
+ # control characters
+ r hset control test "Hello\x00\x01\x02\x03World"
+ assert_equal \"Hello\\u0000\\u0001\\u0002\\u0003World" [run_cli --json hget control test]
+ # non-string keys
+ r hset numkey 1 One
+ assert_equal \{\"1\":\"One\"\} [run_cli --json hgetall numkey]
+ # non-string, non-printable keys
+ r hset npkey "K\u0000\u0001ey" "V\u0000\u0001alue"
+ assert_equal \{\"K\\u0000\\u0001ey\":\"V\\u0000\\u0001alue\"\} [run_cli --json hgetall npkey]
+ assert_equal \{\"K\\\\x00\\\\x01ey\":\"V\\\\x00\\\\x01alue\"\} [run_cli --quoted-json hgetall npkey]
+ }
+
+ test_nontty_cli "Status reply" {
+ assert_equal "OK" [run_cli set key bar]
+ assert_equal "bar" [r get key]
+ }
+
+ test_nontty_cli "Integer reply" {
+ r del counter
+ assert_equal "1" [run_cli incr counter]
+ }
+
+ test_nontty_cli "Bulk reply" {
+ r set key "tab\tnewline\n"
+ assert_equal "tab\tnewline" [run_cli get key]
+ }
+
+ test_nontty_cli "Multi-bulk reply" {
+ r del list
+ r rpush list foo
+ r rpush list bar
+ assert_equal "foo\nbar" [run_cli lrange list 0 -1]
+ }
+
+if {!$::tls} { ;# fake_redis_node doesn't support TLS
+ test_nontty_cli "ASK redirect test" {
+ # Set up two fake Redis nodes.
+ set tclsh [info nameofexecutable]
+ set script "tests/helpers/fake_redis_node.tcl"
+ set port1 [find_available_port $::baseport $::portcount]
+ set port2 [find_available_port $::baseport $::portcount]
+ set p1 [exec $tclsh $script $port1 \
+ "SET foo bar" "-ASK 12182 127.0.0.1:$port2" &]
+ set p2 [exec $tclsh $script $port2 \
+ "ASKING" "+OK" \
+ "SET foo bar" "+OK" &]
+ # Make sure both fake nodes have started listening
+ wait_for_condition 50 50 {
+ [catch {close [socket "127.0.0.1" $port1]}] == 0 && \
+ [catch {close [socket "127.0.0.1" $port2]}] == 0
+ } else {
+ fail "Failed to start fake Redis nodes"
+ }
+ # Run the cli
+ assert_equal "OK" [run_cli_host_port_db "127.0.0.1" $port1 0 -c SET foo bar]
+ }
+}
+
+ test_nontty_cli "Quoted input arguments" {
+ r set "\x00\x00" "value"
+ assert_equal "value" [run_cli --quoted-input get {"\x00\x00"}]
+ }
+
+ test_nontty_cli "No accidental unquoting of input arguments" {
+ run_cli --quoted-input set {"\x41\x41"} quoted-val
+ run_cli set {"\x41\x41"} unquoted-val
+ assert_equal "quoted-val" [r get AA]
+ assert_equal "unquoted-val" [r get {"\x41\x41"}]
+ }
+
+ test_nontty_cli "Invalid quoted input arguments" {
+ catch {run_cli --quoted-input set {"Unterminated}} err
+ assert_match {*exited abnormally*} $err
+
+ # A single arg that unquotes to two arguments is also not expected
+ catch {run_cli --quoted-input set {"arg1" "arg2"}} err
+ assert_match {*exited abnormally*} $err
+ }
+
+ test_nontty_cli "Read last argument from pipe" {
+ assert_equal "OK" [run_cli_with_input_pipe x "echo foo" set key]
+ assert_equal "foo\n" [r get key]
+
+ assert_equal "OK" [run_cli_with_input_pipe X "echo foo" set key2 tag]
+ assert_equal "foo\n" [r get key2]
+ }
+
+ test_nontty_cli "Read last argument from file" {
+ set tmpfile [write_tmpfile "from file"]
+
+ assert_equal "OK" [run_cli_with_input_file x $tmpfile set key]
+ assert_equal "from file" [r get key]
+
+ assert_equal "OK" [run_cli_with_input_file X $tmpfile set key2 tag]
+ assert_equal "from file" [r get key2]
+
+ file delete $tmpfile
+ }
+
+ test_nontty_cli "Test command-line hinting - latest server" {
+ # cli will connect to the running server and will use COMMAND DOCS
+ catch {run_cli --test_hint_file tests/assets/test_cli_hint_suite.txt} output
+ assert_match "*SUCCESS*" $output
+ }
+
+ test_nontty_cli "Test command-line hinting - no server" {
+ # cli will fail to connect to the server and will use the cached commands.c
+ catch {run_cli -p 123 --test_hint_file tests/assets/test_cli_hint_suite.txt} output
+ assert_match "*SUCCESS*" $output
+ }
+
+ test_nontty_cli "Test command-line hinting - old server" {
+ # cli will connect to the server but will not use COMMAND DOCS,
+ # and complete the missing info from the cached commands.c
+ r ACL setuser clitest on nopass +@all -command|docs
+ catch {run_cli --user clitest -a nopass --no-auth-warning --test_hint_file tests/assets/test_cli_hint_suite.txt} output
+ assert_match "*SUCCESS*" $output
+ r acl deluser clitest
+ }
+
+ proc test_redis_cli_rdb_dump {functions_only} {
+ r flushdb
+ r function flush
+
+ set dir [lindex [r config get dir] 1]
+
+ assert_equal "OK" [r debug populate 100000 key 1000]
+ assert_equal "lib1" [r function load "#!lua name=lib1\nredis.register_function('func1', function() return 123 end)"]
+ if {$functions_only} {
+ set args "--functions-rdb $dir/cli.rdb"
+ } else {
+ set args "--rdb $dir/cli.rdb"
+ }
+ catch {run_cli {*}$args} output
+ assert_match {*Transfer finished with success*} $output
+
+ file delete "$dir/dump.rdb"
+ file rename "$dir/cli.rdb" "$dir/dump.rdb"
+
+ assert_equal "OK" [r set should-not-exist 1]
+ assert_equal "should_not_exist_func" [r function load "#!lua name=should_not_exist_func\nredis.register_function('should_not_exist_func', function() return 456 end)"]
+ assert_equal "OK" [r debug reload nosave]
+ assert_equal {} [r get should-not-exist]
+ assert_equal {{library_name lib1 engine LUA functions {{name func1 description {} flags {}}}}} [r function list]
+ if {$functions_only} {
+ assert_equal 0 [r dbsize]
+ } else {
+ assert_equal 100000 [r dbsize]
+ }
+ }
+
+ foreach {functions_only} {no yes} {
+
+ test "Dumping an RDB - functions only: $functions_only" {
+ # Disk-based master
+ assert_match "OK" [r config set repl-diskless-sync no]
+ test_redis_cli_rdb_dump $functions_only
+
+ # Disk-less master
+ assert_match "OK" [r config set repl-diskless-sync yes]
+ assert_match "OK" [r config set repl-diskless-sync-delay 0]
+ test_redis_cli_rdb_dump $functions_only
+ } {} {needs:repl needs:debug}
+
+ } ;# foreach functions_only
+
+ test "Scan mode" {
+ r flushdb
+ populate 1000 key: 1
+
+ # basic use
+ assert_equal 1000 [llength [split [run_cli --scan]]]
+
+ # pattern
+ assert_equal {key:2} [run_cli --scan --pattern "*:2"]
+
+ # pattern matching with a quoted string
+ assert_equal {key:2} [run_cli --scan --quoted-pattern {"*:\x32"}]
+ }
+
+ proc test_redis_cli_repl {} {
+ set fd [open_cli "--replica"]
+ wait_for_condition 500 100 {
+ [string match {*slave0:*state=online*} [r info]]
+ } else {
+ fail "redis-cli --replica did not connect"
+ }
+
+ for {set i 0} {$i < 100} {incr i} {
+ r set test-key test-value-$i
+ }
+
+ wait_for_condition 500 100 {
+ [string match {*test-value-99*} [read_cli $fd]]
+ } else {
+ fail "redis-cli --replica didn't read commands"
+ }
+
+ fconfigure $fd -blocking true
+ r client kill type slave
+ catch { close_cli $fd } err
+ assert_match {*Server closed the connection*} $err
+ }
+
+ test "Connecting as a replica" {
+ # Disk-based master
+ assert_match "OK" [r config set repl-diskless-sync no]
+ test_redis_cli_repl
+
+ # Disk-less master
+ assert_match "OK" [r config set repl-diskless-sync yes]
+ assert_match "OK" [r config set repl-diskless-sync-delay 0]
+ test_redis_cli_repl
+ } {} {needs:repl}
+
+ test "Piping raw protocol" {
+ set cmds [tmpfile "cli_cmds"]
+ set cmds_fd [open $cmds "w"]
+
+ set cmds_count 2101
+
+ if {!$::singledb} {
+ puts $cmds_fd [formatCommand select 9]
+ incr cmds_count
+ }
+ puts $cmds_fd [formatCommand del test-counter]
+
+ for {set i 0} {$i < 1000} {incr i} {
+ puts $cmds_fd [formatCommand incr test-counter]
+ puts $cmds_fd [formatCommand set large-key [string repeat "x" 20000]]
+ }
+
+ for {set i 0} {$i < 100} {incr i} {
+ puts $cmds_fd [formatCommand set very-large-key [string repeat "x" 512000]]
+ }
+ close $cmds_fd
+
+ set cli_fd [open_cli "--pipe" $cmds]
+ fconfigure $cli_fd -blocking true
+ set output [read_cli $cli_fd]
+
+ assert_equal {1000} [r get test-counter]
+ assert_match "*All data transferred*errors: 0*replies: ${cmds_count}*" $output
+
+ file delete $cmds
+ }
+
+ test "Options -X with illegal argument" {
+ assert_error "*-x and -X are mutually exclusive*" {run_cli -x -X tag}
+
+ assert_error "*Unrecognized option or bad number*" {run_cli -X}
+
+ assert_error "*tag not match*" {run_cli_with_input_pipe X "echo foo" set key wrong_tag}
+ }
+
+ test "DUMP RESTORE with -x option" {
+ set cmdline [rediscli [srv host] [srv port]]
+
+ exec {*}$cmdline DEL set new_set
+ exec {*}$cmdline SADD set 1 2 3 4 5 6
+ assert_equal 6 [exec {*}$cmdline SCARD set]
+
+ assert_equal "OK" [exec {*}$cmdline -D "" --raw DUMP set | \
+ {*}$cmdline -x RESTORE new_set 0]
+
+ assert_equal 6 [exec {*}$cmdline SCARD new_set]
+ assert_equal "1\n2\n3\n4\n5\n6" [exec {*}$cmdline SMEMBERS new_set]
+ }
+
+ test "DUMP RESTORE with -X option" {
+ set cmdline [rediscli [srv host] [srv port]]
+
+ exec {*}$cmdline DEL zset new_zset
+ exec {*}$cmdline ZADD zset 1 a 2 b 3 c
+ assert_equal 3 [exec {*}$cmdline ZCARD zset]
+
+ assert_equal "OK" [exec {*}$cmdline -D "" --raw DUMP zset | \
+ {*}$cmdline -X dump_tag RESTORE new_zset 0 dump_tag REPLACE]
+
+ assert_equal 3 [exec {*}$cmdline ZCARD new_zset]
+ assert_equal "a\n1\nb\n2\nc\n3" [exec {*}$cmdline ZRANGE new_zset 0 -1 WITHSCORES]
+ }
+}
diff --git a/tests/integration/replication-2.tcl b/tests/integration/replication-2.tcl
new file mode 100644
index 0000000..c18ff24
--- /dev/null
+++ b/tests/integration/replication-2.tcl
@@ -0,0 +1,93 @@
+start_server {tags {"repl external:skip"}} {
+ start_server {} {
+ test {First server should have role slave after SLAVEOF} {
+ r -1 slaveof [srv 0 host] [srv 0 port]
+ wait_replica_online r
+ wait_for_condition 50 100 {
+ [s -1 master_link_status] eq {up}
+ } else {
+ fail "Replication not started."
+ }
+ }
+
+ test {If min-slaves-to-write is honored, write is accepted} {
+ r config set min-slaves-to-write 1
+ r config set min-slaves-max-lag 10
+ r set foo 12345
+ wait_for_condition 50 100 {
+ [r -1 get foo] eq {12345}
+ } else {
+ fail "Write did not reached replica"
+ }
+ }
+
+ test {No write if min-slaves-to-write is < attached slaves} {
+ r config set min-slaves-to-write 2
+ r config set min-slaves-max-lag 10
+ catch {r set foo 12345} err
+ set err
+ } {NOREPLICAS*}
+
+ test {If min-slaves-to-write is honored, write is accepted (again)} {
+ r config set min-slaves-to-write 1
+ r config set min-slaves-max-lag 10
+ r set foo 12345
+ wait_for_condition 50 100 {
+ [r -1 get foo] eq {12345}
+ } else {
+ fail "Write did not reached replica"
+ }
+ }
+
+ test {No write if min-slaves-max-lag is > of the slave lag} {
+ r config set min-slaves-to-write 1
+ r config set min-slaves-max-lag 2
+ pause_process [srv -1 pid]
+ assert {[r set foo 12345] eq {OK}}
+ wait_for_condition 100 100 {
+ [catch {r set foo 12345}] != 0
+ } else {
+ fail "Master didn't become readonly"
+ }
+ catch {r set foo 12345} err
+ assert_match {NOREPLICAS*} $err
+ }
+ resume_process [srv -1 pid]
+
+ test {min-slaves-to-write is ignored by slaves} {
+ r config set min-slaves-to-write 1
+ r config set min-slaves-max-lag 10
+ r -1 config set min-slaves-to-write 1
+ r -1 config set min-slaves-max-lag 10
+ r set foo aaabbb
+ wait_for_condition 50 100 {
+ [r -1 get foo] eq {aaabbb}
+ } else {
+ fail "Write did not reached replica"
+ }
+ }
+
+ # Fix parameters for the next test to work
+ r config set min-slaves-to-write 0
+ r -1 config set min-slaves-to-write 0
+ r flushall
+
+ test {MASTER and SLAVE dataset should be identical after complex ops} {
+ createComplexDataset r 10000
+ after 500
+ if {[r debug digest] ne [r -1 debug digest]} {
+ set csv1 [csvdump r]
+ set csv2 [csvdump {r -1}]
+ set fd [open /tmp/repldump1.txt w]
+ puts -nonewline $fd $csv1
+ close $fd
+ set fd [open /tmp/repldump2.txt w]
+ puts -nonewline $fd $csv2
+ close $fd
+ puts "Master - Replica inconsistency"
+ puts "Run diff -u against /tmp/repldump*.txt for more info"
+ }
+ assert_equal [r debug digest] [r -1 debug digest]
+ }
+ }
+}
diff --git a/tests/integration/replication-3.tcl b/tests/integration/replication-3.tcl
new file mode 100644
index 0000000..f53a05a
--- /dev/null
+++ b/tests/integration/replication-3.tcl
@@ -0,0 +1,130 @@
+start_server {tags {"repl external:skip"}} {
+ start_server {} {
+ test {First server should have role slave after SLAVEOF} {
+ r -1 slaveof [srv 0 host] [srv 0 port]
+ wait_for_condition 50 100 {
+ [s -1 master_link_status] eq {up}
+ } else {
+ fail "Replication not started."
+ }
+ }
+
+ if {$::accurate} {set numops 50000} else {set numops 5000}
+
+ test {MASTER and SLAVE consistency with expire} {
+ createComplexDataset r $numops useexpire
+
+ # Make sure everything expired before taking the digest
+ # createComplexDataset uses max expire time of 2 seconds
+ wait_for_condition 50 100 {
+ 0 == [scan [regexp -inline {expires\=([\d]*)} [r -1 info keyspace]] expires=%d]
+ } else {
+ fail "expire didn't end"
+ }
+
+ # make sure the replica got all the DELs
+ wait_for_ofs_sync [srv 0 client] [srv -1 client]
+
+ if {[r debug digest] ne [r -1 debug digest]} {
+ set csv1 [csvdump r]
+ set csv2 [csvdump {r -1}]
+ set fd [open /tmp/repldump1.txt w]
+ puts -nonewline $fd $csv1
+ close $fd
+ set fd [open /tmp/repldump2.txt w]
+ puts -nonewline $fd $csv2
+ close $fd
+ puts "Master - Replica inconsistency"
+ puts "Run diff -u against /tmp/repldump*.txt for more info"
+ }
+ assert_equal [r debug digest] [r -1 debug digest]
+ }
+
+ test {Master can replicate command longer than client-query-buffer-limit on replica} {
+ # Configure the master to have a bigger query buffer limit
+ r config set client-query-buffer-limit 2000000
+ r -1 config set client-query-buffer-limit 1048576
+ # Write a very large command onto the master
+ r set key [string repeat "x" 1100000]
+ wait_for_condition 300 100 {
+ [r -1 get key] eq [string repeat "x" 1100000]
+ } else {
+ fail "Unable to replicate command longer than client-query-buffer-limit"
+ }
+ }
+
+ test {Slave is able to evict keys created in writable slaves} {
+ r -1 select 5
+ assert {[r -1 dbsize] == 0}
+ r -1 config set slave-read-only no
+ r -1 set key1 1 ex 5
+ r -1 set key2 2 ex 5
+ r -1 set key3 3 ex 5
+ assert {[r -1 dbsize] == 3}
+ after 6000
+ r -1 dbsize
+ } {0}
+
+ test {Writable replica doesn't return expired keys} {
+ r select 5
+ assert {[r dbsize] == 0}
+ r debug set-active-expire 0
+ r set key1 5 px 10
+ r set key2 5 px 10
+ r -1 select 5
+ wait_for_condition 50 100 {
+ [r -1 dbsize] == 2 && [r -1 exists key1 key2] == 0
+ } else {
+ fail "Keys didn't replicate or didn't expire."
+ }
+ r -1 config set slave-read-only no
+ assert_equal 2 [r -1 dbsize] ; # active expire is off
+ assert_equal 1 [r -1 incr key1] ; # incr expires and re-creates key1
+ assert_equal -1 [r -1 ttl key1] ; # incr created key1 without TTL
+ assert_equal {} [r -1 get key2] ; # key2 expired but not deleted
+ assert_equal 2 [r -1 dbsize]
+ # cleanup
+ r debug set-active-expire 1
+ r -1 del key1 key2
+ r -1 config set slave-read-only yes
+ r del key1 key2
+ }
+
+ test {PFCOUNT updates cache on readonly replica} {
+ r select 5
+ assert {[r dbsize] == 0}
+ r pfadd key a b c d e f g h i j k l m n o p q
+ set strval [r get key]
+ r -1 select 5
+ wait_for_condition 50 100 {
+ [r -1 dbsize] == 1
+ } else {
+ fail "Replication timeout."
+ }
+ assert {$strval == [r -1 get key]}
+ assert_equal 17 [r -1 pfcount key]
+ assert {$strval != [r -1 get key]}; # cache updated
+ # cleanup
+ r del key
+ }
+
+ test {PFCOUNT doesn't use expired key on readonly replica} {
+ r select 5
+ assert {[r dbsize] == 0}
+ r debug set-active-expire 0
+ r pfadd key a b c d e f g h i j k l m n o p q
+ r pexpire key 10
+ r -1 select 5
+ wait_for_condition 50 100 {
+ [r -1 dbsize] == 1 && [r -1 exists key] == 0
+ } else {
+ fail "Key didn't replicate or didn't expire."
+ }
+ assert_equal [r -1 pfcount key] 0 ; # expired key not used
+ assert_equal [r -1 dbsize] 1 ; # but it's also not deleted
+ # cleanup
+ r debug set-active-expire 1
+ r del key
+ }
+ }
+}
diff --git a/tests/integration/replication-4.tcl b/tests/integration/replication-4.tcl
new file mode 100644
index 0000000..4370080
--- /dev/null
+++ b/tests/integration/replication-4.tcl
@@ -0,0 +1,295 @@
+start_server {tags {"repl network external:skip singledb:skip"} overrides {save {}}} {
+ start_server { overrides {save {}}} {
+
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+ set slave [srv 0 client]
+
+ set load_handle0 [start_bg_complex_data $master_host $master_port 9 100000]
+ set load_handle1 [start_bg_complex_data $master_host $master_port 11 100000]
+ set load_handle2 [start_bg_complex_data $master_host $master_port 12 100000]
+
+ test {First server should have role slave after SLAVEOF} {
+ $slave slaveof $master_host $master_port
+ wait_for_condition 50 100 {
+ [s 0 role] eq {slave}
+ } else {
+ fail "Replication not started."
+ }
+ }
+
+ test {Test replication with parallel clients writing in different DBs} {
+ # Gives the random workloads a chance to add some complex commands.
+ after 5000
+
+ # Make sure all parallel clients have written data.
+ wait_for_condition 1000 50 {
+ [$master select 9] == {OK} && [$master dbsize] > 0 &&
+ [$master select 11] == {OK} && [$master dbsize] > 0 &&
+ [$master select 12] == {OK} && [$master dbsize] > 0
+ } else {
+ fail "Parallel clients are not writing in different DBs."
+ }
+
+ stop_bg_complex_data $load_handle0
+ stop_bg_complex_data $load_handle1
+ stop_bg_complex_data $load_handle2
+ wait_for_condition 100 100 {
+ [$master debug digest] == [$slave debug digest]
+ } else {
+ set csv1 [csvdump r]
+ set csv2 [csvdump {r -1}]
+ set fd [open /tmp/repldump1.txt w]
+ puts -nonewline $fd $csv1
+ close $fd
+ set fd [open /tmp/repldump2.txt w]
+ puts -nonewline $fd $csv2
+ close $fd
+ fail "Master - Replica inconsistency, Run diff -u against /tmp/repldump*.txt for more info"
+ }
+ }
+ }
+}
+
+start_server {tags {"repl external:skip"}} {
+ start_server {} {
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+ set slave [srv 0 client]
+
+ # Load some functions to be used later
+ $master FUNCTION load replace {#!lua name=test
+ redis.register_function{function_name='f_default_flags', callback=function(keys, args) return redis.call('get',keys[1]) end, flags={}}
+ redis.register_function{function_name='f_no_writes', callback=function(keys, args) return redis.call('get',keys[1]) end, flags={'no-writes'}}
+ }
+
+ test {First server should have role slave after SLAVEOF} {
+ $slave slaveof $master_host $master_port
+ wait_replica_online $master
+ }
+
+ test {With min-slaves-to-write (1,3): master should be writable} {
+ $master config set min-slaves-max-lag 3
+ $master config set min-slaves-to-write 1
+ assert_equal OK [$master set foo 123]
+ assert_equal OK [$master eval "return redis.call('set','foo',12345)" 0]
+ }
+
+ test {With min-slaves-to-write (2,3): master should not be writable} {
+ $master config set min-slaves-max-lag 3
+ $master config set min-slaves-to-write 2
+ assert_error "*NOREPLICAS*" {$master set foo bar}
+ assert_error "*NOREPLICAS*" {$master eval "redis.call('set','foo','bar')" 0}
+ }
+
+ test {With min-slaves-to-write function without no-write flag} {
+ assert_error "*NOREPLICAS*" {$master fcall f_default_flags 1 foo}
+ assert_equal "12345" [$master fcall f_no_writes 1 foo]
+ }
+
+ test {With not enough good slaves, read in Lua script is still accepted} {
+ $master config set min-slaves-max-lag 3
+ $master config set min-slaves-to-write 1
+ $master eval "redis.call('set','foo','bar')" 0
+
+ $master config set min-slaves-to-write 2
+ $master eval "return redis.call('get','foo')" 0
+ } {bar}
+
+ test {With min-slaves-to-write: master not writable with lagged slave} {
+ $master config set min-slaves-max-lag 2
+ $master config set min-slaves-to-write 1
+ assert_equal OK [$master set foo 123]
+ assert_equal OK [$master eval "return redis.call('set','foo',12345)" 0]
+ # Killing a slave to make it become a lagged slave.
+ pause_process [srv 0 pid]
+ # Waiting for slave kill.
+ wait_for_condition 100 100 {
+ [catch {$master set foo 123}] != 0
+ } else {
+ fail "Master didn't become readonly"
+ }
+ assert_error "*NOREPLICAS*" {$master set foo 123}
+ assert_error "*NOREPLICAS*" {$master eval "return redis.call('set','foo',12345)" 0}
+ resume_process [srv 0 pid]
+ }
+ }
+}
+
+start_server {tags {"repl external:skip"}} {
+ start_server {} {
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+ set slave [srv 0 client]
+
+ test {First server should have role slave after SLAVEOF} {
+ $slave slaveof $master_host $master_port
+ wait_for_condition 50 100 {
+ [s 0 master_link_status] eq {up}
+ } else {
+ fail "Replication not started."
+ }
+ }
+
+ test {Replication of an expired key does not delete the expired key} {
+ # This test is very likely to do a false positive if the wait_for_ofs_sync
+ # takes longer than the expiration time, so give it a few more chances.
+ # Go with 5 retries of increasing timeout, i.e. start with 500ms, then go
+ # to 1000ms, 2000ms, 4000ms, 8000ms.
+ set px_ms 500
+ for {set i 0} {$i < 5} {incr i} {
+
+ wait_for_ofs_sync $master $slave
+ $master debug set-active-expire 0
+ $master set k 1 px $px_ms
+ wait_for_ofs_sync $master $slave
+ pause_process [srv 0 pid]
+ $master incr k
+ after [expr $px_ms + 1]
+ # Stopping the replica for one second to makes sure the INCR arrives
+ # to the replica after the key is logically expired.
+ resume_process [srv 0 pid]
+ wait_for_ofs_sync $master $slave
+ # Check that k is logically expired but is present in the replica.
+ set res [$slave exists k]
+ set errcode [catch {$slave debug object k} err] ; # Raises exception if k is gone.
+ if {$res == 0 && $errcode == 0} { break }
+ set px_ms [expr $px_ms * 2]
+
+ } ;# for
+
+ if {$::verbose} { puts "Replication of an expired key does not delete the expired key test attempts: $i" }
+ assert_equal $res 0
+ assert_equal $errcode 0
+ }
+ }
+}
+
+start_server {tags {"repl external:skip"}} {
+ start_server {} {
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+ set slave [srv 0 client]
+
+ test {First server should have role slave after SLAVEOF} {
+ $slave slaveof $master_host $master_port
+ wait_for_condition 50 100 {
+ [s 0 role] eq {slave}
+ } else {
+ fail "Replication not started."
+ }
+ }
+
+ test {Replication: commands with many arguments (issue #1221)} {
+ # We now issue large MSET commands, that may trigger a specific
+ # class of bugs, see issue #1221.
+ for {set j 0} {$j < 100} {incr j} {
+ set cmd [list mset]
+ for {set x 0} {$x < 1000} {incr x} {
+ lappend cmd [randomKey] [randomValue]
+ }
+ $master {*}$cmd
+ }
+
+ set retry 10
+ while {$retry && ([$master debug digest] ne [$slave debug digest])}\
+ {
+ after 1000
+ incr retry -1
+ }
+ assert {[$master dbsize] > 0}
+ }
+
+ test {spopwithcount rewrite srem command} {
+ $master del myset
+
+ set content {}
+ for {set j 0} {$j < 4000} {} {
+ lappend content [incr j]
+ }
+ $master sadd myset {*}$content
+ $master spop myset 1023
+ $master spop myset 1024
+ $master spop myset 1025
+
+ assert_match 928 [$master scard myset]
+ assert_match {*calls=3,*} [cmdrstat spop $master]
+
+ wait_for_condition 50 100 {
+ [status $slave master_repl_offset] == [status $master master_repl_offset]
+ } else {
+ fail "SREM replication inconsistency."
+ }
+ assert_match {*calls=4,*} [cmdrstat srem $slave]
+ assert_match 928 [$slave scard myset]
+ }
+
+ test {Replication of SPOP command -- alsoPropagate() API} {
+ $master del myset
+ set size [expr 1+[randomInt 100]]
+ set content {}
+ for {set j 0} {$j < $size} {incr j} {
+ lappend content [randomValue]
+ }
+ $master sadd myset {*}$content
+
+ set count [randomInt 100]
+ set result [$master spop myset $count]
+
+ wait_for_condition 50 100 {
+ [$master debug digest] eq [$slave debug digest]
+ } else {
+ fail "SPOP replication inconsistency"
+ }
+ }
+ }
+}
+
+start_server {tags {"repl external:skip"}} {
+ start_server {} {
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+ set replica [srv 0 client]
+
+ test {First server should have role slave after SLAVEOF} {
+ $replica slaveof $master_host $master_port
+ wait_for_condition 50 100 {
+ [s 0 role] eq {slave}
+ } else {
+ fail "Replication not started."
+ }
+ wait_for_sync $replica
+ }
+
+ test {Data divergence can happen under default conditions} {
+ $replica config set propagation-error-behavior ignore
+ $master debug replicate fake-command-1
+
+ # Wait for replication to normalize
+ $master set foo bar2
+ $master wait 1 2000
+
+ # Make sure we triggered the error, by finding the critical
+ # message and the fake command.
+ assert_equal [count_log_message 0 "fake-command-1"] 1
+ assert_equal [count_log_message 0 "== CRITICAL =="] 1
+ }
+
+ test {Data divergence is allowed on writable replicas} {
+ $replica config set replica-read-only no
+ $replica set number2 foo
+ $master incrby number2 1
+ $master wait 1 2000
+
+ assert_equal [$master get number2] 1
+ assert_equal [$replica get number2] foo
+
+ assert_equal [count_log_message 0 "incrby"] 1
+ }
+ }
+}
diff --git a/tests/integration/replication-buffer.tcl b/tests/integration/replication-buffer.tcl
new file mode 100644
index 0000000..64b26ca
--- /dev/null
+++ b/tests/integration/replication-buffer.tcl
@@ -0,0 +1,307 @@
+# This test group aims to test that all replicas share one global replication buffer,
+# two replicas don't make replication buffer size double, and when there is no replica,
+# replica buffer will shrink.
+start_server {tags {"repl external:skip"}} {
+start_server {} {
+start_server {} {
+start_server {} {
+ set replica1 [srv -3 client]
+ set replica2 [srv -2 client]
+ set replica3 [srv -1 client]
+
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+
+ $master config set save ""
+ $master config set repl-backlog-size 16384
+ $master config set repl-diskless-sync-delay 5
+ $master config set repl-diskless-sync-max-replicas 1
+ $master config set client-output-buffer-limit "replica 0 0 0"
+
+ # Make sure replica3 is synchronized with master
+ $replica3 replicaof $master_host $master_port
+ wait_for_sync $replica3
+
+ # Generating RDB will take some 100 seconds
+ $master config set rdb-key-save-delay 1000000
+ populate 100 "" 16
+
+ # Make sure replica1 and replica2 are waiting bgsave
+ $master config set repl-diskless-sync-max-replicas 2
+ $replica1 replicaof $master_host $master_port
+ $replica2 replicaof $master_host $master_port
+ wait_for_condition 50 100 {
+ ([s rdb_bgsave_in_progress] == 1) &&
+ [lindex [$replica1 role] 3] eq {sync} &&
+ [lindex [$replica2 role] 3] eq {sync}
+ } else {
+ fail "fail to sync with replicas"
+ }
+
+ test {All replicas share one global replication buffer} {
+ set before_used [s used_memory]
+ populate 1024 "" 1024 ; # Write extra 1M data
+ # New data uses 1M memory, but all replicas use only one
+ # replication buffer, so all replicas output memory is not
+ # more than double of replication buffer.
+ set repl_buf_mem [s mem_total_replication_buffers]
+ set extra_mem [expr {[s used_memory]-$before_used-1024*1024}]
+ assert {$extra_mem < 2*$repl_buf_mem}
+
+ # Kill replica1, replication_buffer will not become smaller
+ catch {$replica1 shutdown nosave}
+ wait_for_condition 50 100 {
+ [s connected_slaves] eq {2}
+ } else {
+ fail "replica doesn't disconnect with master"
+ }
+ assert_equal $repl_buf_mem [s mem_total_replication_buffers]
+ }
+
+ test {Replication buffer will become smaller when no replica uses} {
+ # Make sure replica3 catch up with the master
+ wait_for_ofs_sync $master $replica3
+
+ set repl_buf_mem [s mem_total_replication_buffers]
+ # Kill replica2, replication_buffer will become smaller
+ catch {$replica2 shutdown nosave}
+ wait_for_condition 50 100 {
+ [s connected_slaves] eq {1}
+ } else {
+ fail "replica2 doesn't disconnect with master"
+ }
+ assert {[expr $repl_buf_mem - 1024*1024] > [s mem_total_replication_buffers]}
+ }
+}
+}
+}
+}
+
+# This test group aims to test replication backlog size can outgrow the backlog
+# limit config if there is a slow replica which keep massive replication buffers,
+# and replicas could use this replication buffer (beyond backlog config) for
+# partial re-synchronization. Of course, replication backlog memory also can
+# become smaller when master disconnects with slow replicas since output buffer
+# limit is reached.
+start_server {tags {"repl external:skip"}} {
+start_server {} {
+start_server {} {
+ set replica1 [srv -2 client]
+ set replica1_pid [s -2 process_id]
+ set replica2 [srv -1 client]
+ set replica2_pid [s -1 process_id]
+
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+
+ $master config set save ""
+ $master config set repl-backlog-size 16384
+ $master config set client-output-buffer-limit "replica 0 0 0"
+
+ # Executing 'debug digest' on master which has many keys costs much time
+ # (especially in valgrind), this causes that replica1 and replica2 disconnect
+ # with master.
+ $master config set repl-timeout 1000
+ $replica1 config set repl-timeout 1000
+ $replica2 config set repl-timeout 1000
+
+ $replica1 replicaof $master_host $master_port
+ wait_for_sync $replica1
+
+ test {Replication backlog size can outgrow the backlog limit config} {
+ # Generating RDB will take 1000 seconds
+ $master config set rdb-key-save-delay 1000000
+ populate 1000 master 10000
+ $replica2 replicaof $master_host $master_port
+ # Make sure replica2 is waiting bgsave
+ wait_for_condition 5000 100 {
+ ([s rdb_bgsave_in_progress] == 1) &&
+ [lindex [$replica2 role] 3] eq {sync}
+ } else {
+ fail "fail to sync with replicas"
+ }
+ # Replication actual backlog grow more than backlog setting since
+ # the slow replica2 kept replication buffer.
+ populate 10000 master 10000
+ assert {[s repl_backlog_histlen] > [expr 10000*10000]}
+ }
+
+ # Wait replica1 catch up with the master
+ wait_for_condition 1000 100 {
+ [s -2 master_repl_offset] eq [s master_repl_offset]
+ } else {
+ fail "Replica offset didn't catch up with the master after too long time"
+ }
+
+ test {Replica could use replication buffer (beyond backlog config) for partial resynchronization} {
+ # replica1 disconnects with master
+ $replica1 replicaof [srv -1 host] [srv -1 port]
+ # Write a mass of data that exceeds repl-backlog-size
+ populate 10000 master 10000
+ # replica1 reconnects with master
+ $replica1 replicaof $master_host $master_port
+ wait_for_condition 1000 100 {
+ [s -2 master_repl_offset] eq [s master_repl_offset]
+ } else {
+ fail "Replica offset didn't catch up with the master after too long time"
+ }
+
+ # replica2 still waits for bgsave ending
+ assert {[s rdb_bgsave_in_progress] eq {1} && [lindex [$replica2 role] 3] eq {sync}}
+ # master accepted replica1 partial resync
+ assert_equal [s sync_partial_ok] {1}
+ assert_equal [$master debug digest] [$replica1 debug digest]
+ }
+
+ test {Replication backlog memory will become smaller if disconnecting with replica} {
+ assert {[s repl_backlog_histlen] > [expr 2*10000*10000]}
+ assert_equal [s connected_slaves] {2}
+
+ pause_process $replica2_pid
+ r config set client-output-buffer-limit "replica 128k 0 0"
+ # trigger output buffer limit check
+ r set key [string repeat A [expr 64*1024]]
+ # master will close replica2's connection since replica2's output
+ # buffer limit is reached, so there only is replica1.
+ wait_for_condition 100 100 {
+ [s connected_slaves] eq {1}
+ } else {
+ fail "master didn't disconnect with replica2"
+ }
+
+ # Since we trim replication backlog inrementally, replication backlog
+ # memory may take time to be reclaimed.
+ wait_for_condition 1000 100 {
+ [s repl_backlog_histlen] < [expr 10000*10000]
+ } else {
+ fail "Replication backlog memory is not smaller"
+ }
+ resume_process $replica2_pid
+ }
+ # speed up termination
+ $master config set shutdown-timeout 0
+}
+}
+}
+
+test {Partial resynchronization is successful even client-output-buffer-limit is less than repl-backlog-size} {
+ start_server {tags {"repl external:skip"}} {
+ start_server {} {
+ r config set save ""
+ r config set repl-backlog-size 100mb
+ r config set client-output-buffer-limit "replica 512k 0 0"
+
+ set replica [srv -1 client]
+ $replica replicaof [srv 0 host] [srv 0 port]
+ wait_for_sync $replica
+
+ set big_str [string repeat A [expr 10*1024*1024]] ;# 10mb big string
+ r multi
+ r client kill type replica
+ r set key $big_str
+ r set key $big_str
+ r debug sleep 2 ;# wait for replica reconnecting
+ r exec
+ # When replica reconnects with master, master accepts partial resync,
+ # and don't close replica client even client output buffer limit is
+ # reached.
+ r set key $big_str ;# trigger output buffer limit check
+ wait_for_ofs_sync r $replica
+ # master accepted replica partial resync
+ assert_equal [s sync_full] {1}
+ assert_equal [s sync_partial_ok] {1}
+
+ r multi
+ r set key $big_str
+ r set key $big_str
+ r exec
+ # replica's reply buffer size is more than client-output-buffer-limit but
+ # doesn't exceed repl-backlog-size, we don't close replica client.
+ wait_for_condition 1000 100 {
+ [s -1 master_repl_offset] eq [s master_repl_offset]
+ } else {
+ fail "Replica offset didn't catch up with the master after too long time"
+ }
+ assert_equal [s sync_full] {1}
+ assert_equal [s sync_partial_ok] {1}
+ }
+ }
+}
+
+# This test was added to make sure big keys added to the backlog do not trigger psync loop.
+test {Replica client-output-buffer size is limited to backlog_limit/16 when no replication data is pending} {
+ proc client_field {r type f} {
+ set client [$r client list type $type]
+ if {![regexp $f=(\[a-zA-Z0-9-\]+) $client - res]} {
+ error "field $f not found for in $client"
+ }
+ return $res
+ }
+
+ start_server {tags {"repl external:skip"}} {
+ start_server {} {
+ set replica [srv -1 client]
+ set replica_host [srv -1 host]
+ set replica_port [srv -1 port]
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+ $master config set maxmemory-policy allkeys-lru
+
+ $master config set repl-backlog-size 16384
+ $master config set client-output-buffer-limit "replica 32768 32768 60"
+ # Key has has to be larger than replica client-output-buffer limit.
+ set keysize [expr 256*1024]
+
+ $replica replicaof $master_host $master_port
+ wait_for_condition 50 100 {
+ [lindex [$replica role] 0] eq {slave} &&
+ [string match {*master_link_status:up*} [$replica info replication]]
+ } else {
+ fail "Can't turn the instance into a replica"
+ }
+
+ # Write a big key that is gonna breach the obuf limit and cause the replica to disconnect,
+ # then in the same event loop, add at least 16 more keys, and enable eviction, so that the
+ # eviction code has a chance to call flushSlavesOutputBuffers, and then run PING to trigger the eviction code
+ set _v [prepare_value $keysize]
+ $master write "[format_command mset key $_v k1 1 k2 2 k3 3 k4 4 k5 5 k6 6 k7 7 k8 8 k9 9 ka a kb b kc c kd d ke e kf f kg g kh h]config set maxmemory 1\r\nping\r\n"
+ $master flush
+ $master read
+ $master read
+ $master read
+ wait_for_ofs_sync $master $replica
+
+ # Write another key to force the test to wait for another event loop iteration so that we
+ # give the serverCron a chance to disconnect replicas with COB size exceeding the limits
+ $master config set maxmemory 0
+ $master set key1 1
+ wait_for_ofs_sync $master $replica
+
+ assert {[status $master connected_slaves] == 1}
+
+ wait_for_condition 50 100 {
+ [client_field $master replica tot-mem] < $keysize
+ } else {
+ fail "replica client-output-buffer usage is higher than expected."
+ }
+
+ # now we expect the replica to re-connect but fail partial sync (it doesn't have large
+ # enough COB limit and must result in a full-sync)
+ assert {[status $master sync_partial_ok] == 0}
+
+ # Before this fix (#11905), the test would trigger an assertion in 'o->used >= c->ref_block_pos'
+ test {The update of replBufBlock's repl_offset is ok - Regression test for #11666} {
+ set rd [redis_deferring_client]
+ set replid [status $master master_replid]
+ set offset [status $master repl_backlog_first_byte_offset]
+ $rd psync $replid $offset
+ assert_equal {PONG} [$master ping] ;# Make sure the master doesn't crash.
+ $rd close
+ }
+ }
+ }
+}
+
diff --git a/tests/integration/replication-psync.tcl b/tests/integration/replication-psync.tcl
new file mode 100644
index 0000000..dc1df0f
--- /dev/null
+++ b/tests/integration/replication-psync.tcl
@@ -0,0 +1,143 @@
+# Creates a master-slave pair and breaks the link continuously to force
+# partial resyncs attempts, all this while flooding the master with
+# write queries.
+#
+# You can specify backlog size, ttl, delay before reconnection, test duration
+# in seconds, and an additional condition to verify at the end.
+#
+# If reconnect is > 0, the test actually try to break the connection and
+# reconnect with the master, otherwise just the initial synchronization is
+# checked for consistency.
+proc test_psync {descr duration backlog_size backlog_ttl delay cond mdl sdl reconnect} {
+ start_server {tags {"repl"} overrides {save {}}} {
+ start_server {overrides {save {}}} {
+
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+ set slave [srv 0 client]
+
+ $master config set repl-backlog-size $backlog_size
+ $master config set repl-backlog-ttl $backlog_ttl
+ $master config set repl-diskless-sync $mdl
+ $master config set repl-diskless-sync-delay 1
+ $slave config set repl-diskless-load $sdl
+
+ set load_handle0 [start_bg_complex_data $master_host $master_port 9 100000]
+ set load_handle1 [start_bg_complex_data $master_host $master_port 11 100000]
+ set load_handle2 [start_bg_complex_data $master_host $master_port 12 100000]
+
+ test {Slave should be able to synchronize with the master} {
+ $slave slaveof $master_host $master_port
+ wait_for_condition 50 100 {
+ [lindex [r role] 0] eq {slave} &&
+ [lindex [r role] 3] eq {connected}
+ } else {
+ fail "Replication not started."
+ }
+ }
+
+ # Check that the background clients are actually writing.
+ test {Detect write load to master} {
+ wait_for_condition 50 1000 {
+ [$master dbsize] > 100
+ } else {
+ fail "Can't detect write load from background clients."
+ }
+ }
+
+ test "Test replication partial resync: $descr (diskless: $mdl, $sdl, reconnect: $reconnect)" {
+ # Now while the clients are writing data, break the maste-slave
+ # link multiple times.
+ if ($reconnect) {
+ for {set j 0} {$j < $duration*10} {incr j} {
+ after 100
+ # catch {puts "MASTER [$master dbsize] keys, REPLICA [$slave dbsize] keys"}
+
+ if {($j % 20) == 0} {
+ catch {
+ if {$delay} {
+ $slave multi
+ $slave client kill $master_host:$master_port
+ $slave debug sleep $delay
+ $slave exec
+ } else {
+ $slave client kill $master_host:$master_port
+ }
+ }
+ }
+ }
+ }
+ stop_bg_complex_data $load_handle0
+ stop_bg_complex_data $load_handle1
+ stop_bg_complex_data $load_handle2
+
+ # Wait for the slave to reach the "online"
+ # state from the POV of the master.
+ set retry 5000
+ while {$retry} {
+ set info [$master info]
+ if {[string match {*slave0:*state=online*} $info]} {
+ break
+ } else {
+ incr retry -1
+ after 100
+ }
+ }
+ if {$retry == 0} {
+ error "assertion:Slave not correctly synchronized"
+ }
+
+ # Wait that slave acknowledge it is online so
+ # we are sure that DBSIZE and DEBUG DIGEST will not
+ # fail because of timing issues. (-LOADING error)
+ wait_for_condition 5000 100 {
+ [lindex [$slave role] 3] eq {connected}
+ } else {
+ fail "Slave still not connected after some time"
+ }
+
+ wait_for_condition 100 100 {
+ [$master debug digest] == [$slave debug digest]
+ } else {
+ set csv1 [csvdump r]
+ set csv2 [csvdump {r -1}]
+ set fd [open /tmp/repldump1.txt w]
+ puts -nonewline $fd $csv1
+ close $fd
+ set fd [open /tmp/repldump2.txt w]
+ puts -nonewline $fd $csv2
+ close $fd
+ fail "Master - Replica inconsistency, Run diff -u against /tmp/repldump*.txt for more info"
+ }
+ assert {[$master dbsize] > 0}
+ eval $cond
+ }
+ }
+ }
+}
+
+tags {"external:skip"} {
+foreach mdl {no yes} {
+ foreach sdl {disabled swapdb} {
+ test_psync {no reconnection, just sync} 6 1000000 3600 0 {
+ } $mdl $sdl 0
+
+ test_psync {ok psync} 6 100000000 3600 0 {
+ assert {[s -1 sync_partial_ok] > 0}
+ } $mdl $sdl 1
+
+ test_psync {no backlog} 6 100 3600 0.5 {
+ assert {[s -1 sync_partial_err] > 0}
+ } $mdl $sdl 1
+
+ test_psync {ok after delay} 3 100000000 3600 3 {
+ assert {[s -1 sync_partial_ok] > 0}
+ } $mdl $sdl 1
+
+ test_psync {backlog expired} 3 100000000 1 3 {
+ assert {[s -1 sync_partial_err] > 0}
+ } $mdl $sdl 1
+ }
+}
+}
diff --git a/tests/integration/replication.tcl b/tests/integration/replication.tcl
new file mode 100644
index 0000000..de4d527
--- /dev/null
+++ b/tests/integration/replication.tcl
@@ -0,0 +1,1456 @@
+proc log_file_matches {log pattern} {
+ set fp [open $log r]
+ set content [read $fp]
+ close $fp
+ string match $pattern $content
+}
+
+start_server {tags {"repl network external:skip"}} {
+ set slave [srv 0 client]
+ set slave_host [srv 0 host]
+ set slave_port [srv 0 port]
+ set slave_log [srv 0 stdout]
+ start_server {} {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+
+ # Configure the master in order to hang waiting for the BGSAVE
+ # operation, so that the slave remains in the handshake state.
+ $master config set repl-diskless-sync yes
+ $master config set repl-diskless-sync-delay 1000
+
+ # Start the replication process...
+ $slave slaveof $master_host $master_port
+
+ test {Slave enters handshake} {
+ wait_for_condition 50 1000 {
+ [string match *handshake* [$slave role]]
+ } else {
+ fail "Replica does not enter handshake state"
+ }
+ }
+
+ test {Slave enters wait_bgsave} {
+ wait_for_condition 50 1000 {
+ [string match *state=wait_bgsave* [$master info replication]]
+ } else {
+ fail "Replica does not enter wait_bgsave state"
+ }
+ }
+
+ # Use a short replication timeout on the slave, so that if there
+ # are no bugs the timeout is triggered in a reasonable amount
+ # of time.
+ $slave config set repl-timeout 5
+
+ # But make the master unable to send
+ # the periodic newlines to refresh the connection. The slave
+ # should detect the timeout.
+ $master debug sleep 10
+
+ test {Slave is able to detect timeout during handshake} {
+ wait_for_condition 50 1000 {
+ [log_file_matches $slave_log "*Timeout connecting to the MASTER*"]
+ } else {
+ fail "Replica is not able to detect timeout"
+ }
+ }
+ }
+}
+
+start_server {tags {"repl external:skip"}} {
+ set A [srv 0 client]
+ set A_host [srv 0 host]
+ set A_port [srv 0 port]
+ start_server {} {
+ set B [srv 0 client]
+ set B_host [srv 0 host]
+ set B_port [srv 0 port]
+
+ test {Set instance A as slave of B} {
+ $A slaveof $B_host $B_port
+ wait_for_condition 50 100 {
+ [lindex [$A role] 0] eq {slave} &&
+ [string match {*master_link_status:up*} [$A info replication]]
+ } else {
+ fail "Can't turn the instance into a replica"
+ }
+ }
+
+ test {INCRBYFLOAT replication, should not remove expire} {
+ r set test 1 EX 100
+ r incrbyfloat test 0.1
+ wait_for_ofs_sync $A $B
+ assert_equal [$A debug digest] [$B debug digest]
+ }
+
+ test {GETSET replication} {
+ $A config resetstat
+ $A config set loglevel debug
+ $B config set loglevel debug
+ r set test foo
+ assert_equal [r getset test bar] foo
+ wait_for_condition 500 10 {
+ [$A get test] eq "bar"
+ } else {
+ fail "getset wasn't propagated"
+ }
+ assert_equal [r set test vaz get] bar
+ wait_for_condition 500 10 {
+ [$A get test] eq "vaz"
+ } else {
+ fail "set get wasn't propagated"
+ }
+ assert_match {*calls=3,*} [cmdrstat set $A]
+ assert_match {} [cmdrstat getset $A]
+ }
+
+ test {BRPOPLPUSH replication, when blocking against empty list} {
+ $A config resetstat
+ set rd [redis_deferring_client]
+ $rd brpoplpush a b 5
+ r lpush a foo
+ wait_for_condition 50 100 {
+ [$A debug digest] eq [$B debug digest]
+ } else {
+ fail "Master and replica have different digest: [$A debug digest] VS [$B debug digest]"
+ }
+ assert_match {*calls=1,*} [cmdrstat rpoplpush $A]
+ assert_match {} [cmdrstat lmove $A]
+ }
+
+ test {BRPOPLPUSH replication, list exists} {
+ $A config resetstat
+ set rd [redis_deferring_client]
+ r lpush c 1
+ r lpush c 2
+ r lpush c 3
+ $rd brpoplpush c d 5
+ after 1000
+ assert_equal [$A debug digest] [$B debug digest]
+ assert_match {*calls=1,*} [cmdrstat rpoplpush $A]
+ assert_match {} [cmdrstat lmove $A]
+ }
+
+ foreach wherefrom {left right} {
+ foreach whereto {left right} {
+ test "BLMOVE ($wherefrom, $whereto) replication, when blocking against empty list" {
+ $A config resetstat
+ set rd [redis_deferring_client]
+ $rd blmove a b $wherefrom $whereto 5
+ r lpush a foo
+ wait_for_condition 50 100 {
+ [$A debug digest] eq [$B debug digest]
+ } else {
+ fail "Master and replica have different digest: [$A debug digest] VS [$B debug digest]"
+ }
+ assert_match {*calls=1,*} [cmdrstat lmove $A]
+ assert_match {} [cmdrstat rpoplpush $A]
+ }
+
+ test "BLMOVE ($wherefrom, $whereto) replication, list exists" {
+ $A config resetstat
+ set rd [redis_deferring_client]
+ r lpush c 1
+ r lpush c 2
+ r lpush c 3
+ $rd blmove c d $wherefrom $whereto 5
+ after 1000
+ assert_equal [$A debug digest] [$B debug digest]
+ assert_match {*calls=1,*} [cmdrstat lmove $A]
+ assert_match {} [cmdrstat rpoplpush $A]
+ }
+ }
+ }
+
+ test {BLPOP followed by role change, issue #2473} {
+ set rd [redis_deferring_client]
+ $rd blpop foo 0 ; # Block while B is a master
+
+ # Turn B into master of A
+ $A slaveof no one
+ $B slaveof $A_host $A_port
+ wait_for_condition 50 100 {
+ [lindex [$B role] 0] eq {slave} &&
+ [string match {*master_link_status:up*} [$B info replication]]
+ } else {
+ fail "Can't turn the instance into a replica"
+ }
+
+ # Push elements into the "foo" list of the new replica.
+ # If the client is still attached to the instance, we'll get
+ # a desync between the two instances.
+ $A rpush foo a b c
+ after 100
+
+ wait_for_condition 50 100 {
+ [$A debug digest] eq [$B debug digest] &&
+ [$A lrange foo 0 -1] eq {a b c} &&
+ [$B lrange foo 0 -1] eq {a b c}
+ } else {
+ fail "Master and replica have different digest: [$A debug digest] VS [$B debug digest]"
+ }
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=1*} [cmdrstat blpop $B]
+ }
+ }
+}
+
+start_server {tags {"repl external:skip"}} {
+ r set mykey foo
+
+ start_server {} {
+ test {Second server should have role master at first} {
+ s role
+ } {master}
+
+ test {SLAVEOF should start with link status "down"} {
+ r multi
+ r slaveof [srv -1 host] [srv -1 port]
+ r info replication
+ r exec
+ } {*master_link_status:down*}
+
+ test {The role should immediately be changed to "replica"} {
+ s role
+ } {slave}
+
+ wait_for_sync r
+ test {Sync should have transferred keys from master} {
+ r get mykey
+ } {foo}
+
+ test {The link status should be up} {
+ s master_link_status
+ } {up}
+
+ test {SET on the master should immediately propagate} {
+ r -1 set mykey bar
+
+ wait_for_condition 500 100 {
+ [r 0 get mykey] eq {bar}
+ } else {
+ fail "SET on master did not propagated on replica"
+ }
+ }
+
+ test {FLUSHDB / FLUSHALL should replicate} {
+ # we're attaching to a sub-replica, so we need to stop pings on the real master
+ r -1 config set repl-ping-replica-period 3600
+
+ set repl [attach_to_replication_stream]
+
+ r -1 set key value
+ r -1 flushdb
+
+ r -1 set key value2
+ r -1 flushall
+
+ wait_for_ofs_sync [srv 0 client] [srv -1 client]
+ assert_equal [r -1 dbsize] 0
+ assert_equal [r 0 dbsize] 0
+
+ # DB is empty.
+ r -1 flushdb
+ r -1 flushdb
+ r -1 eval {redis.call("flushdb")} 0
+
+ # DBs are empty.
+ r -1 flushall
+ r -1 flushall
+ r -1 eval {redis.call("flushall")} 0
+
+ # add another command to check nothing else was propagated after the above
+ r -1 incr x
+
+ # Assert that each FLUSHDB command is replicated even the DB is empty.
+ # Assert that each FLUSHALL command is replicated even the DBs are empty.
+ assert_replication_stream $repl {
+ {set key value}
+ {flushdb}
+ {set key value2}
+ {flushall}
+ {flushdb}
+ {flushdb}
+ {flushdb}
+ {flushall}
+ {flushall}
+ {flushall}
+ {incr x}
+ }
+ close_replication_stream $repl
+ }
+
+ test {ROLE in master reports master with a slave} {
+ set res [r -1 role]
+ lassign $res role offset slaves
+ assert {$role eq {master}}
+ assert {$offset > 0}
+ assert {[llength $slaves] == 1}
+ lassign [lindex $slaves 0] master_host master_port slave_offset
+ assert {$slave_offset <= $offset}
+ }
+
+ test {ROLE in slave reports slave in connected state} {
+ set res [r role]
+ lassign $res role master_host master_port slave_state slave_offset
+ assert {$role eq {slave}}
+ assert {$slave_state eq {connected}}
+ }
+ }
+}
+
+foreach mdl {no yes} {
+ foreach sdl {disabled swapdb} {
+ start_server {tags {"repl external:skip"} overrides {save {}}} {
+ set master [srv 0 client]
+ $master config set repl-diskless-sync $mdl
+ $master config set repl-diskless-sync-delay 5
+ $master config set repl-diskless-sync-max-replicas 3
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+ set slaves {}
+ start_server {overrides {save {}}} {
+ lappend slaves [srv 0 client]
+ start_server {overrides {save {}}} {
+ lappend slaves [srv 0 client]
+ start_server {overrides {save {}}} {
+ lappend slaves [srv 0 client]
+ test "Connect multiple replicas at the same time (issue #141), master diskless=$mdl, replica diskless=$sdl" {
+ # start load handles only inside the test, so that the test can be skipped
+ set load_handle0 [start_bg_complex_data $master_host $master_port 9 100000000]
+ set load_handle1 [start_bg_complex_data $master_host $master_port 11 100000000]
+ set load_handle2 [start_bg_complex_data $master_host $master_port 12 100000000]
+ set load_handle3 [start_write_load $master_host $master_port 8]
+ set load_handle4 [start_write_load $master_host $master_port 4]
+ after 5000 ;# wait for some data to accumulate so that we have RDB part for the fork
+
+ # Send SLAVEOF commands to slaves
+ [lindex $slaves 0] config set repl-diskless-load $sdl
+ [lindex $slaves 1] config set repl-diskless-load $sdl
+ [lindex $slaves 2] config set repl-diskless-load $sdl
+ [lindex $slaves 0] slaveof $master_host $master_port
+ [lindex $slaves 1] slaveof $master_host $master_port
+ [lindex $slaves 2] slaveof $master_host $master_port
+
+ # Wait for all the three slaves to reach the "online"
+ # state from the POV of the master.
+ set retry 500
+ while {$retry} {
+ set info [r -3 info]
+ if {[string match {*slave0:*state=online*slave1:*state=online*slave2:*state=online*} $info]} {
+ break
+ } else {
+ incr retry -1
+ after 100
+ }
+ }
+ if {$retry == 0} {
+ error "assertion:Slaves not correctly synchronized"
+ }
+
+ # Wait that slaves acknowledge they are online so
+ # we are sure that DBSIZE and DEBUG DIGEST will not
+ # fail because of timing issues.
+ wait_for_condition 500 100 {
+ [lindex [[lindex $slaves 0] role] 3] eq {connected} &&
+ [lindex [[lindex $slaves 1] role] 3] eq {connected} &&
+ [lindex [[lindex $slaves 2] role] 3] eq {connected}
+ } else {
+ fail "Slaves still not connected after some time"
+ }
+
+ # Stop the write load
+ stop_bg_complex_data $load_handle0
+ stop_bg_complex_data $load_handle1
+ stop_bg_complex_data $load_handle2
+ stop_write_load $load_handle3
+ stop_write_load $load_handle4
+
+ # Make sure no more commands processed
+ wait_load_handlers_disconnected -3
+
+ wait_for_ofs_sync $master [lindex $slaves 0]
+ wait_for_ofs_sync $master [lindex $slaves 1]
+ wait_for_ofs_sync $master [lindex $slaves 2]
+
+ # Check digests
+ set digest [$master debug digest]
+ set digest0 [[lindex $slaves 0] debug digest]
+ set digest1 [[lindex $slaves 1] debug digest]
+ set digest2 [[lindex $slaves 2] debug digest]
+ assert {$digest ne 0000000000000000000000000000000000000000}
+ assert {$digest eq $digest0}
+ assert {$digest eq $digest1}
+ assert {$digest eq $digest2}
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+start_server {tags {"repl external:skip"} overrides {save {}}} {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+ start_server {overrides {save {}}} {
+ test "Master stream is correctly processed while the replica has a script in -BUSY state" {
+ set load_handle0 [start_write_load $master_host $master_port 3]
+ set slave [srv 0 client]
+ $slave config set lua-time-limit 500
+ $slave slaveof $master_host $master_port
+
+ # Wait for the slave to be online
+ wait_for_condition 500 100 {
+ [lindex [$slave role] 3] eq {connected}
+ } else {
+ fail "Replica still not connected after some time"
+ }
+
+ # Wait some time to make sure the master is sending data
+ # to the slave.
+ after 5000
+
+ # Stop the ability of the slave to process data by sendig
+ # a script that will put it in BUSY state.
+ $slave eval {for i=1,3000000000 do end} 0
+
+ # Wait some time again so that more master stream will
+ # be processed.
+ after 2000
+
+ # Stop the write load
+ stop_write_load $load_handle0
+
+ # number of keys
+ wait_for_condition 500 100 {
+ [$master debug digest] eq [$slave debug digest]
+ } else {
+ fail "Different datasets between replica and master"
+ }
+ }
+ }
+}
+
+# Diskless load swapdb when NOT async_loading (different master replid)
+foreach testType {Successful Aborted} {
+ start_server {tags {"repl external:skip"}} {
+ set replica [srv 0 client]
+ set replica_host [srv 0 host]
+ set replica_port [srv 0 port]
+ set replica_log [srv 0 stdout]
+ start_server {} {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+
+ # Set master and replica to use diskless replication on swapdb mode
+ $master config set repl-diskless-sync yes
+ $master config set repl-diskless-sync-delay 0
+ $master config set save ""
+ $replica config set repl-diskless-load swapdb
+ $replica config set save ""
+
+ # Put different data sets on the master and replica
+ # We need to put large keys on the master since the replica replies to info only once in 2mb
+ $replica debug populate 200 slave 10
+ $master debug populate 1000 master 100000
+ $master config set rdbcompression no
+
+ # Set a key value on replica to check status on failure and after swapping db
+ $replica set mykey myvalue
+
+ switch $testType {
+ "Aborted" {
+ # Set master with a slow rdb generation, so that we can easily intercept loading
+ # 10ms per key, with 1000 keys is 10 seconds
+ $master config set rdb-key-save-delay 10000
+
+ # Start the replication process
+ $replica replicaof $master_host $master_port
+
+ test {Diskless load swapdb (different replid): replica enter loading} {
+ # Wait for the replica to start reading the rdb
+ wait_for_condition 100 100 {
+ [s -1 loading] eq 1
+ } else {
+ fail "Replica didn't get into loading mode"
+ }
+
+ assert_equal [s -1 async_loading] 0
+ }
+
+ # Make sure that next sync will not start immediately so that we can catch the replica in between syncs
+ $master config set repl-diskless-sync-delay 5
+
+ # Kill the replica connection on the master
+ set killed [$master client kill type replica]
+
+ # Wait for loading to stop (fail)
+ wait_for_condition 100 100 {
+ [s -1 loading] eq 0
+ } else {
+ fail "Replica didn't disconnect"
+ }
+
+ test {Diskless load swapdb (different replid): old database is exposed after replication fails} {
+ # Ensure we see old values from replica
+ assert_equal [$replica get mykey] "myvalue"
+
+ # Make sure amount of replica keys didn't change
+ assert_equal [$replica dbsize] 201
+ }
+
+ # Speed up shutdown
+ $master config set rdb-key-save-delay 0
+ }
+ "Successful" {
+ # Start the replication process
+ $replica replicaof $master_host $master_port
+
+ # Let replica finish sync with master
+ wait_for_condition 100 100 {
+ [s -1 master_link_status] eq "up"
+ } else {
+ fail "Master <-> Replica didn't finish sync"
+ }
+
+ test {Diskless load swapdb (different replid): new database is exposed after swapping} {
+ # Ensure we don't see anymore the key that was stored only to replica and also that we don't get LOADING status
+ assert_equal [$replica GET mykey] ""
+
+ # Make sure amount of keys matches master
+ assert_equal [$replica dbsize] 1000
+ }
+ }
+ }
+ }
+ }
+}
+
+# Diskless load swapdb when async_loading (matching master replid)
+foreach testType {Successful Aborted} {
+ start_server {tags {"repl external:skip"}} {
+ set replica [srv 0 client]
+ set replica_host [srv 0 host]
+ set replica_port [srv 0 port]
+ set replica_log [srv 0 stdout]
+ start_server {} {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+
+ # Set master and replica to use diskless replication on swapdb mode
+ $master config set repl-diskless-sync yes
+ $master config set repl-diskless-sync-delay 0
+ $master config set save ""
+ $replica config set repl-diskless-load swapdb
+ $replica config set save ""
+
+ # Set replica writable so we can check that a key we manually added is served
+ # during replication and after failure, but disappears on success
+ $replica config set replica-read-only no
+
+ # Initial sync to have matching replids between master and replica
+ $replica replicaof $master_host $master_port
+
+ # Let replica finish initial sync with master
+ wait_for_condition 100 100 {
+ [s -1 master_link_status] eq "up"
+ } else {
+ fail "Master <-> Replica didn't finish sync"
+ }
+
+ # Put different data sets on the master and replica
+ # We need to put large keys on the master since the replica replies to info only once in 2mb
+ $replica debug populate 2000 slave 10
+ $master debug populate 2000 master 100000
+ $master config set rdbcompression no
+
+ # Set a key value on replica to check status during loading, on failure and after swapping db
+ $replica set mykey myvalue
+
+ # Set a function value on replica to check status during loading, on failure and after swapping db
+ $replica function load {#!lua name=test
+ redis.register_function('test', function() return 'hello1' end)
+ }
+
+ # Set a function value on master to check it reaches the replica when replication ends
+ $master function load {#!lua name=test
+ redis.register_function('test', function() return 'hello2' end)
+ }
+
+ # Remember the sync_full stat before the client kill.
+ set sync_full [s 0 sync_full]
+
+ if {$testType == "Aborted"} {
+ # Set master with a slow rdb generation, so that we can easily intercept loading
+ # 10ms per key, with 2000 keys is 20 seconds
+ $master config set rdb-key-save-delay 10000
+ }
+
+ # Force the replica to try another full sync (this time it will have matching master replid)
+ $master multi
+ $master client kill type replica
+ # Fill replication backlog with new content
+ $master config set repl-backlog-size 16384
+ for {set keyid 0} {$keyid < 10} {incr keyid} {
+ $master set "$keyid string_$keyid" [string repeat A 16384]
+ }
+ $master exec
+
+ # Wait for sync_full to get incremented from the previous value.
+ # After the client kill, make sure we do a reconnect, and do a FULL SYNC.
+ wait_for_condition 100 100 {
+ [s 0 sync_full] > $sync_full
+ } else {
+ fail "Master <-> Replica didn't start the full sync"
+ }
+
+ switch $testType {
+ "Aborted" {
+ test {Diskless load swapdb (async_loading): replica enter async_loading} {
+ # Wait for the replica to start reading the rdb
+ wait_for_condition 100 100 {
+ [s -1 async_loading] eq 1
+ } else {
+ fail "Replica didn't get into async_loading mode"
+ }
+
+ assert_equal [s -1 loading] 0
+ }
+
+ test {Diskless load swapdb (async_loading): old database is exposed while async replication is in progress} {
+ # Ensure we still see old values while async_loading is in progress and also not LOADING status
+ assert_equal [$replica get mykey] "myvalue"
+
+ # Ensure we still can call old function while async_loading is in progress
+ assert_equal [$replica fcall test 0] "hello1"
+
+ # Make sure we're still async_loading to validate previous assertion
+ assert_equal [s -1 async_loading] 1
+
+ # Make sure amount of replica keys didn't change
+ assert_equal [$replica dbsize] 2001
+ }
+
+ test {Busy script during async loading} {
+ set rd_replica [redis_deferring_client -1]
+ $replica config set lua-time-limit 10
+ $rd_replica eval {while true do end} 0
+ after 200
+ assert_error {BUSY*} {$replica ping}
+ $replica script kill
+ after 200 ; # Give some time to Lua to call the hook again...
+ assert_equal [$replica ping] "PONG"
+ $rd_replica close
+ }
+
+ test {Blocked commands and configs during async-loading} {
+ assert_error {LOADING*} {$replica config set appendonly no}
+ assert_error {LOADING*} {$replica REPLICAOF no one}
+ }
+
+ # Make sure that next sync will not start immediately so that we can catch the replica in between syncs
+ $master config set repl-diskless-sync-delay 5
+
+ # Kill the replica connection on the master
+ set killed [$master client kill type replica]
+
+ # Wait for loading to stop (fail)
+ wait_for_condition 100 100 {
+ [s -1 async_loading] eq 0
+ } else {
+ fail "Replica didn't disconnect"
+ }
+
+ test {Diskless load swapdb (async_loading): old database is exposed after async replication fails} {
+ # Ensure we see old values from replica
+ assert_equal [$replica get mykey] "myvalue"
+
+ # Ensure we still can call old function
+ assert_equal [$replica fcall test 0] "hello1"
+
+ # Make sure amount of replica keys didn't change
+ assert_equal [$replica dbsize] 2001
+ }
+
+ # Speed up shutdown
+ $master config set rdb-key-save-delay 0
+ }
+ "Successful" {
+ # Let replica finish sync with master
+ wait_for_condition 100 100 {
+ [s -1 master_link_status] eq "up"
+ } else {
+ fail "Master <-> Replica didn't finish sync"
+ }
+
+ test {Diskless load swapdb (async_loading): new database is exposed after swapping} {
+ # Ensure we don't see anymore the key that was stored only to replica and also that we don't get LOADING status
+ assert_equal [$replica GET mykey] ""
+
+ # Ensure we got the new function
+ assert_equal [$replica fcall test 0] "hello2"
+
+ # Make sure amount of keys matches master
+ assert_equal [$replica dbsize] 2010
+ }
+ }
+ }
+ }
+ }
+}
+
+test {diskless loading short read} {
+ start_server {tags {"repl"} overrides {save ""}} {
+ set replica [srv 0 client]
+ set replica_host [srv 0 host]
+ set replica_port [srv 0 port]
+ start_server {overrides {save ""}} {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+
+ # Set master and replica to use diskless replication
+ $master config set repl-diskless-sync yes
+ $master config set rdbcompression no
+ $replica config set repl-diskless-load swapdb
+ $master config set hz 500
+ $replica config set hz 500
+ $master config set dynamic-hz no
+ $replica config set dynamic-hz no
+ # Try to fill the master with all types of data types / encodings
+ set start [clock clicks -milliseconds]
+
+ # Set a function value to check short read handling on functions
+ r function load {#!lua name=test
+ redis.register_function('test', function() return 'hello1' end)
+ }
+
+ for {set k 0} {$k < 3} {incr k} {
+ for {set i 0} {$i < 10} {incr i} {
+ r set "$k int_$i" [expr {int(rand()*10000)}]
+ r expire "$k int_$i" [expr {int(rand()*10000)}]
+ r set "$k string_$i" [string repeat A [expr {int(rand()*1000000)}]]
+ r hset "$k hash_small" [string repeat A [expr {int(rand()*10)}]] 0[string repeat A [expr {int(rand()*10)}]]
+ r hset "$k hash_large" [string repeat A [expr {int(rand()*10000)}]] [string repeat A [expr {int(rand()*1000000)}]]
+ r sadd "$k set_small" [string repeat A [expr {int(rand()*10)}]]
+ r sadd "$k set_large" [string repeat A [expr {int(rand()*1000000)}]]
+ r zadd "$k zset_small" [expr {rand()}] [string repeat A [expr {int(rand()*10)}]]
+ r zadd "$k zset_large" [expr {rand()}] [string repeat A [expr {int(rand()*1000000)}]]
+ r lpush "$k list_small" [string repeat A [expr {int(rand()*10)}]]
+ r lpush "$k list_large" [string repeat A [expr {int(rand()*1000000)}]]
+ for {set j 0} {$j < 10} {incr j} {
+ r xadd "$k stream" * foo "asdf" bar "1234"
+ }
+ r xgroup create "$k stream" "mygroup_$i" 0
+ r xreadgroup GROUP "mygroup_$i" Alice COUNT 1 STREAMS "$k stream" >
+ }
+ }
+
+ if {$::verbose} {
+ set end [clock clicks -milliseconds]
+ set duration [expr $end - $start]
+ puts "filling took $duration ms (TODO: use pipeline)"
+ set start [clock clicks -milliseconds]
+ }
+
+ # Start the replication process...
+ set loglines [count_log_lines -1]
+ $master config set repl-diskless-sync-delay 0
+ $replica replicaof $master_host $master_port
+
+ # kill the replication at various points
+ set attempts 100
+ if {$::accurate} { set attempts 500 }
+ for {set i 0} {$i < $attempts} {incr i} {
+ # wait for the replica to start reading the rdb
+ # using the log file since the replica only responds to INFO once in 2mb
+ set res [wait_for_log_messages -1 {"*Loading DB in memory*"} $loglines 2000 1]
+ set loglines [lindex $res 1]
+
+ # add some additional random sleep so that we kill the master on a different place each time
+ after [expr {int(rand()*50)}]
+
+ # kill the replica connection on the master
+ set killed [$master client kill type replica]
+
+ set res [wait_for_log_messages -1 {"*Internal error in RDB*" "*Finished with success*" "*Successful partial resynchronization*"} $loglines 500 10]
+ if {$::verbose} { puts $res }
+ set log_text [lindex $res 0]
+ set loglines [lindex $res 1]
+ if {![string match "*Internal error in RDB*" $log_text]} {
+ # force the replica to try another full sync
+ $master multi
+ $master client kill type replica
+ $master set asdf asdf
+ # fill replication backlog with new content
+ $master config set repl-backlog-size 16384
+ for {set keyid 0} {$keyid < 10} {incr keyid} {
+ $master set "$keyid string_$keyid" [string repeat A 16384]
+ }
+ $master exec
+ }
+
+ # wait for loading to stop (fail)
+ # After a loading successfully, next loop will enter `async_loading`
+ wait_for_condition 1000 1 {
+ [s -1 async_loading] eq 0 &&
+ [s -1 loading] eq 0
+ } else {
+ fail "Replica didn't disconnect"
+ }
+ }
+ if {$::verbose} {
+ set end [clock clicks -milliseconds]
+ set duration [expr $end - $start]
+ puts "test took $duration ms"
+ }
+ # enable fast shutdown
+ $master config set rdb-key-save-delay 0
+ }
+ }
+} {} {external:skip}
+
+# get current stime and utime metrics for a thread (since it's creation)
+proc get_cpu_metrics { statfile } {
+ if { [ catch {
+ set fid [ open $statfile r ]
+ set data [ read $fid 1024 ]
+ ::close $fid
+ set data [ split $data ]
+
+ ;## number of jiffies it has been scheduled...
+ set utime [ lindex $data 13 ]
+ set stime [ lindex $data 14 ]
+ } err ] } {
+ error "assertion:can't parse /proc: $err"
+ }
+ set mstime [clock milliseconds]
+ return [ list $mstime $utime $stime ]
+}
+
+# compute %utime and %stime of a thread between two measurements
+proc compute_cpu_usage {start end} {
+ set clock_ticks [exec getconf CLK_TCK]
+ # convert ms time to jiffies and calc delta
+ set dtime [ expr { ([lindex $end 0] - [lindex $start 0]) * double($clock_ticks) / 1000 } ]
+ set utime [ expr { [lindex $end 1] - [lindex $start 1] } ]
+ set stime [ expr { [lindex $end 2] - [lindex $start 2] } ]
+ set pucpu [ expr { ($utime / $dtime) * 100 } ]
+ set pscpu [ expr { ($stime / $dtime) * 100 } ]
+ return [ list $pucpu $pscpu ]
+}
+
+
+# test diskless rdb pipe with multiple replicas, which may drop half way
+start_server {tags {"repl external:skip"} overrides {save ""}} {
+ set master [srv 0 client]
+ $master config set repl-diskless-sync yes
+ $master config set repl-diskless-sync-delay 5
+ $master config set repl-diskless-sync-max-replicas 2
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+ set master_pid [srv 0 pid]
+ # put enough data in the db that the rdb file will be bigger than the socket buffers
+ # and since we'll have key-load-delay of 100, 20000 keys will take at least 2 seconds
+ # we also need the replica to process requests during transfer (which it does only once in 2mb)
+ $master debug populate 20000 test 10000
+ $master config set rdbcompression no
+ # If running on Linux, we also measure utime/stime to detect possible I/O handling issues
+ set os [catch {exec uname}]
+ set measure_time [expr {$os == "Linux"} ? 1 : 0]
+ foreach all_drop {no slow fast all timeout} {
+ test "diskless $all_drop replicas drop during rdb pipe" {
+ set replicas {}
+ set replicas_alive {}
+ # start one replica that will read the rdb fast, and one that will be slow
+ start_server {overrides {save ""}} {
+ lappend replicas [srv 0 client]
+ lappend replicas_alive [srv 0 client]
+ start_server {overrides {save ""}} {
+ lappend replicas [srv 0 client]
+ lappend replicas_alive [srv 0 client]
+
+ # start replication
+ # it's enough for just one replica to be slow, and have it's write handler enabled
+ # so that the whole rdb generation process is bound to that
+ set loglines [count_log_lines -2]
+ [lindex $replicas 0] config set repl-diskless-load swapdb
+ [lindex $replicas 0] config set key-load-delay 100 ;# 20k keys and 100 microseconds sleep means at least 2 seconds
+ [lindex $replicas 0] replicaof $master_host $master_port
+ [lindex $replicas 1] replicaof $master_host $master_port
+
+ # wait for the replicas to start reading the rdb
+ # using the log file since the replica only responds to INFO once in 2mb
+ wait_for_log_messages -1 {"*Loading DB in memory*"} 0 1500 10
+
+ if {$measure_time} {
+ set master_statfile "/proc/$master_pid/stat"
+ set master_start_metrics [get_cpu_metrics $master_statfile]
+ set start_time [clock seconds]
+ }
+
+ # wait a while so that the pipe socket writer will be
+ # blocked on write (since replica 0 is slow to read from the socket)
+ after 500
+
+ # add some command to be present in the command stream after the rdb.
+ $master incr $all_drop
+
+ # disconnect replicas depending on the current test
+ if {$all_drop == "all" || $all_drop == "fast"} {
+ exec kill [srv 0 pid]
+ set replicas_alive [lreplace $replicas_alive 1 1]
+ }
+ if {$all_drop == "all" || $all_drop == "slow"} {
+ exec kill [srv -1 pid]
+ set replicas_alive [lreplace $replicas_alive 0 0]
+ }
+ if {$all_drop == "timeout"} {
+ $master config set repl-timeout 2
+ # we want the slow replica to hang on a key for very long so it'll reach repl-timeout
+ pause_process [srv -1 pid]
+ after 2000
+ }
+
+ # wait for rdb child to exit
+ wait_for_condition 500 100 {
+ [s -2 rdb_bgsave_in_progress] == 0
+ } else {
+ fail "rdb child didn't terminate"
+ }
+
+ # make sure we got what we were aiming for, by looking for the message in the log file
+ if {$all_drop == "all"} {
+ wait_for_log_messages -2 {"*Diskless rdb transfer, last replica dropped, killing fork child*"} $loglines 1 1
+ }
+ if {$all_drop == "no"} {
+ wait_for_log_messages -2 {"*Diskless rdb transfer, done reading from pipe, 2 replicas still up*"} $loglines 1 1
+ }
+ if {$all_drop == "slow" || $all_drop == "fast"} {
+ wait_for_log_messages -2 {"*Diskless rdb transfer, done reading from pipe, 1 replicas still up*"} $loglines 1 1
+ }
+ if {$all_drop == "timeout"} {
+ wait_for_log_messages -2 {"*Disconnecting timedout replica (full sync)*"} $loglines 1 1
+ wait_for_log_messages -2 {"*Diskless rdb transfer, done reading from pipe, 1 replicas still up*"} $loglines 1 1
+ # master disconnected the slow replica, remove from array
+ set replicas_alive [lreplace $replicas_alive 0 0]
+ # release it
+ resume_process [srv -1 pid]
+ }
+
+ # make sure we don't have a busy loop going thought epoll_wait
+ if {$measure_time} {
+ set master_end_metrics [get_cpu_metrics $master_statfile]
+ set time_elapsed [expr {[clock seconds]-$start_time}]
+ set master_cpu [compute_cpu_usage $master_start_metrics $master_end_metrics]
+ set master_utime [lindex $master_cpu 0]
+ set master_stime [lindex $master_cpu 1]
+ if {$::verbose} {
+ puts "elapsed: $time_elapsed"
+ puts "master utime: $master_utime"
+ puts "master stime: $master_stime"
+ }
+ if {!$::no_latency && ($all_drop == "all" || $all_drop == "slow" || $all_drop == "timeout")} {
+ assert {$master_utime < 70}
+ assert {$master_stime < 70}
+ }
+ if {!$::no_latency && ($all_drop == "none" || $all_drop == "fast")} {
+ assert {$master_utime < 15}
+ assert {$master_stime < 15}
+ }
+ }
+
+ # verify the data integrity
+ foreach replica $replicas_alive {
+ # Wait that replicas acknowledge they are online so
+ # we are sure that DBSIZE and DEBUG DIGEST will not
+ # fail because of timing issues.
+ wait_for_condition 150 100 {
+ [lindex [$replica role] 3] eq {connected}
+ } else {
+ fail "replicas still not connected after some time"
+ }
+
+ # Make sure that replicas and master have same
+ # number of keys
+ wait_for_condition 50 100 {
+ [$master dbsize] == [$replica dbsize]
+ } else {
+ fail "Different number of keys between master and replicas after too long time."
+ }
+
+ # Check digests
+ set digest [$master debug digest]
+ set digest0 [$replica debug digest]
+ assert {$digest ne 0000000000000000000000000000000000000000}
+ assert {$digest eq $digest0}
+ }
+ }
+ }
+ }
+ }
+}
+
+test "diskless replication child being killed is collected" {
+ # when diskless master is waiting for the replica to become writable
+ # it removes the read event from the rdb pipe so if the child gets killed
+ # the replica will hung. and the master may not collect the pid with waitpid
+ start_server {tags {"repl"} overrides {save ""}} {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+ set master_pid [srv 0 pid]
+ $master config set repl-diskless-sync yes
+ $master config set repl-diskless-sync-delay 0
+ # put enough data in the db that the rdb file will be bigger than the socket buffers
+ $master debug populate 20000 test 10000
+ $master config set rdbcompression no
+ start_server {overrides {save ""}} {
+ set replica [srv 0 client]
+ set loglines [count_log_lines 0]
+ $replica config set repl-diskless-load swapdb
+ $replica config set key-load-delay 1000000
+ $replica config set loading-process-events-interval-bytes 1024
+ $replica replicaof $master_host $master_port
+
+ # wait for the replicas to start reading the rdb
+ wait_for_log_messages 0 {"*Loading DB in memory*"} $loglines 1500 10
+
+ # wait to be sure the replica is hung and the master is blocked on write
+ after 500
+
+ # simulate the OOM killer or anyone else kills the child
+ set fork_child_pid [get_child_pid -1]
+ exec kill -9 $fork_child_pid
+
+ # wait for the parent to notice the child have exited
+ wait_for_condition 50 100 {
+ [s -1 rdb_bgsave_in_progress] == 0
+ } else {
+ fail "rdb child didn't terminate"
+ }
+
+ # Speed up shutdown
+ $replica config set key-load-delay 0
+ }
+ }
+} {} {external:skip}
+
+foreach mdl {yes no} {
+ test "replication child dies when parent is killed - diskless: $mdl" {
+ # when master is killed, make sure the fork child can detect that and exit
+ start_server {tags {"repl"} overrides {save ""}} {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+ set master_pid [srv 0 pid]
+ $master config set repl-diskless-sync $mdl
+ $master config set repl-diskless-sync-delay 0
+ # create keys that will take 10 seconds to save
+ $master config set rdb-key-save-delay 1000
+ $master debug populate 10000
+ start_server {overrides {save ""}} {
+ set replica [srv 0 client]
+ $replica replicaof $master_host $master_port
+
+ # wait for rdb child to start
+ wait_for_condition 5000 10 {
+ [s -1 rdb_bgsave_in_progress] == 1
+ } else {
+ fail "rdb child didn't start"
+ }
+ set fork_child_pid [get_child_pid -1]
+
+ # simulate the OOM killer or anyone else kills the parent
+ exec kill -9 $master_pid
+
+ # wait for the child to notice the parent died have exited
+ wait_for_condition 500 10 {
+ [process_is_alive $fork_child_pid] == 0
+ } else {
+ fail "rdb child didn't terminate"
+ }
+ }
+ }
+ } {} {external:skip}
+}
+
+test "diskless replication read pipe cleanup" {
+ # In diskless replication, we create a read pipe for the RDB, between the child and the parent.
+ # When we close this pipe (fd), the read handler also needs to be removed from the event loop (if it still registered).
+ # Otherwise, next time we will use the same fd, the registration will be fail (panic), because
+ # we will use EPOLL_CTL_MOD (the fd still register in the event loop), on fd that already removed from epoll_ctl
+ start_server {tags {"repl"} overrides {save ""}} {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+ set master_pid [srv 0 pid]
+ $master config set repl-diskless-sync yes
+ $master config set repl-diskless-sync-delay 0
+
+ # put enough data in the db, and slowdown the save, to keep the parent busy at the read process
+ $master config set rdb-key-save-delay 100000
+ $master debug populate 20000 test 10000
+ $master config set rdbcompression no
+ start_server {overrides {save ""}} {
+ set replica [srv 0 client]
+ set loglines [count_log_lines 0]
+ $replica config set repl-diskless-load swapdb
+ $replica replicaof $master_host $master_port
+
+ # wait for the replicas to start reading the rdb
+ wait_for_log_messages 0 {"*Loading DB in memory*"} $loglines 1500 10
+
+ set loglines [count_log_lines -1]
+ # send FLUSHALL so the RDB child will be killed
+ $master flushall
+
+ # wait for another RDB child process to be started
+ wait_for_log_messages -1 {"*Background RDB transfer started by pid*"} $loglines 800 10
+
+ # make sure master is alive
+ $master ping
+ }
+ }
+} {} {external:skip}
+
+test {replicaof right after disconnection} {
+ # this is a rare race condition that was reproduced sporadically by the psync2 unit.
+ # see details in #7205
+ start_server {tags {"repl"} overrides {save ""}} {
+ set replica1 [srv 0 client]
+ set replica1_host [srv 0 host]
+ set replica1_port [srv 0 port]
+ set replica1_log [srv 0 stdout]
+ start_server {overrides {save ""}} {
+ set replica2 [srv 0 client]
+ set replica2_host [srv 0 host]
+ set replica2_port [srv 0 port]
+ set replica2_log [srv 0 stdout]
+ start_server {overrides {save ""}} {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+ $replica1 replicaof $master_host $master_port
+ $replica2 replicaof $master_host $master_port
+
+ wait_for_condition 50 100 {
+ [string match {*master_link_status:up*} [$replica1 info replication]] &&
+ [string match {*master_link_status:up*} [$replica2 info replication]]
+ } else {
+ fail "Can't turn the instance into a replica"
+ }
+
+ set rd [redis_deferring_client -1]
+ $rd debug sleep 1
+ after 100
+
+ # when replica2 will wake up from the sleep it will find both disconnection
+ # from it's master and also a replicaof command at the same event loop
+ $master client kill type replica
+ $replica2 replicaof $replica1_host $replica1_port
+ $rd read
+
+ wait_for_condition 50 100 {
+ [string match {*master_link_status:up*} [$replica2 info replication]]
+ } else {
+ fail "role change failed."
+ }
+
+ # make sure psync succeeded, and there were no unexpected full syncs.
+ assert_equal [status $master sync_full] 2
+ assert_equal [status $replica1 sync_full] 0
+ assert_equal [status $replica2 sync_full] 0
+ }
+ }
+ }
+} {} {external:skip}
+
+test {Kill rdb child process if its dumping RDB is not useful} {
+ start_server {tags {"repl"}} {
+ set slave1 [srv 0 client]
+ start_server {} {
+ set slave2 [srv 0 client]
+ start_server {} {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+ for {set i 0} {$i < 10} {incr i} {
+ $master set $i $i
+ }
+ # Generating RDB will cost 10s(10 * 1s)
+ $master config set rdb-key-save-delay 1000000
+ $master config set repl-diskless-sync no
+ $master config set save ""
+
+ $slave1 slaveof $master_host $master_port
+ $slave2 slaveof $master_host $master_port
+
+ # Wait for starting child
+ wait_for_condition 50 100 {
+ ([s 0 rdb_bgsave_in_progress] == 1) &&
+ ([string match "*wait_bgsave*" [s 0 slave0]]) &&
+ ([string match "*wait_bgsave*" [s 0 slave1]])
+ } else {
+ fail "rdb child didn't start"
+ }
+
+ # Slave1 disconnect with master
+ $slave1 slaveof no one
+ # Shouldn't kill child since another slave wait for rdb
+ after 100
+ assert {[s 0 rdb_bgsave_in_progress] == 1}
+
+ # Slave2 disconnect with master
+ $slave2 slaveof no one
+ # Should kill child
+ wait_for_condition 100 10 {
+ [s 0 rdb_bgsave_in_progress] eq 0
+ } else {
+ fail "can't kill rdb child"
+ }
+
+ # If have save parameters, won't kill child
+ $master config set save "900 1"
+ $slave1 slaveof $master_host $master_port
+ $slave2 slaveof $master_host $master_port
+ wait_for_condition 50 100 {
+ ([s 0 rdb_bgsave_in_progress] == 1) &&
+ ([string match "*wait_bgsave*" [s 0 slave0]]) &&
+ ([string match "*wait_bgsave*" [s 0 slave1]])
+ } else {
+ fail "rdb child didn't start"
+ }
+ $slave1 slaveof no one
+ $slave2 slaveof no one
+ after 200
+ assert {[s 0 rdb_bgsave_in_progress] == 1}
+ catch {$master shutdown nosave}
+ }
+ }
+ }
+} {} {external:skip}
+
+start_server {tags {"repl external:skip"}} {
+ set master1_host [srv 0 host]
+ set master1_port [srv 0 port]
+ r set a b
+
+ start_server {} {
+ set master2 [srv 0 client]
+ set master2_host [srv 0 host]
+ set master2_port [srv 0 port]
+ # Take 10s for dumping RDB
+ $master2 debug populate 10 master2 10
+ $master2 config set rdb-key-save-delay 1000000
+
+ start_server {} {
+ set sub_replica [srv 0 client]
+
+ start_server {} {
+ # Full sync with master1
+ r slaveof $master1_host $master1_port
+ wait_for_sync r
+ assert_equal "b" [r get a]
+
+ # Let sub replicas sync with me
+ $sub_replica slaveof [srv 0 host] [srv 0 port]
+ wait_for_sync $sub_replica
+ assert_equal "b" [$sub_replica get a]
+
+ # Full sync with master2, and then kill master2 before finishing dumping RDB
+ r slaveof $master2_host $master2_port
+ wait_for_condition 50 100 {
+ ([s -2 rdb_bgsave_in_progress] == 1) &&
+ ([string match "*wait_bgsave*" [s -2 slave0]])
+ } else {
+ fail "full sync didn't start"
+ }
+ catch {$master2 shutdown nosave}
+
+ test {Don't disconnect with replicas before loading transferred RDB when full sync} {
+ assert ![log_file_matches [srv -1 stdout] "*Connection with master lost*"]
+ # The replication id is not changed in entire replication chain
+ assert_equal [s master_replid] [s -3 master_replid]
+ assert_equal [s master_replid] [s -1 master_replid]
+ }
+
+ test {Discard cache master before loading transferred RDB when full sync} {
+ set full_sync [s -3 sync_full]
+ set partial_sync [s -3 sync_partial_ok]
+ # Partial sync with master1
+ r slaveof $master1_host $master1_port
+ wait_for_sync r
+ # master1 accepts partial sync instead of full sync
+ assert_equal $full_sync [s -3 sync_full]
+ assert_equal [expr $partial_sync+1] [s -3 sync_partial_ok]
+
+ # Since master only partially sync replica, and repl id is not changed,
+ # the replica doesn't disconnect with its sub-replicas
+ assert_equal [s master_replid] [s -3 master_replid]
+ assert_equal [s master_replid] [s -1 master_replid]
+ assert ![log_file_matches [srv -1 stdout] "*Connection with master lost*"]
+ # Sub replica just has one full sync, no partial resync.
+ assert_equal 1 [s sync_full]
+ assert_equal 0 [s sync_partial_ok]
+ }
+ }
+ }
+ }
+}
+
+test {replica can handle EINTR if use diskless load} {
+ start_server {tags {"repl"}} {
+ set replica [srv 0 client]
+ set replica_log [srv 0 stdout]
+ start_server {} {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+
+ $master debug populate 100 master 100000
+ $master config set rdbcompression no
+ $master config set repl-diskless-sync yes
+ $master config set repl-diskless-sync-delay 0
+ $replica config set repl-diskless-load on-empty-db
+ # Construct EINTR error by using the built in watchdog
+ $replica config set watchdog-period 200
+ # Block replica in read()
+ $master config set rdb-key-save-delay 10000
+ # set speedy shutdown
+ $master config set save ""
+ # Start the replication process...
+ $replica replicaof $master_host $master_port
+
+ # Wait for the replica to start reading the rdb
+ set res [wait_for_log_messages -1 {"*Loading DB in memory*"} 0 200 10]
+ set loglines [lindex $res 1]
+
+ # Wait till we see the watchgod log line AFTER the loading started
+ wait_for_log_messages -1 {"*WATCHDOG TIMER EXPIRED*"} $loglines 200 10
+
+ # Make sure we're still loading, and that there was just one full sync attempt
+ assert ![log_file_matches [srv -1 stdout] "*Reconnecting to MASTER*"]
+ assert_equal 1 [s 0 sync_full]
+ assert_equal 1 [s -1 loading]
+ }
+ }
+} {} {external:skip}
+
+start_server {tags {"repl" "external:skip"}} {
+ test "replica do not write the reply to the replication link - SYNC (_addReplyToBufferOrList)" {
+ set rd [redis_deferring_client]
+ set lines [count_log_lines 0]
+
+ $rd sync
+ $rd ping
+ catch {$rd read} e
+ if {$::verbose} { puts "SYNC _addReplyToBufferOrList: $e" }
+ assert_equal "PONG" [r ping]
+
+ # Check we got the warning logs about the PING command.
+ verify_log_message 0 "*Replica generated a reply to command 'ping', disconnecting it: *" $lines
+
+ $rd close
+ waitForBgsave r
+ }
+
+ test "replica do not write the reply to the replication link - SYNC (addReplyDeferredLen)" {
+ set rd [redis_deferring_client]
+ set lines [count_log_lines 0]
+
+ $rd sync
+ $rd xinfo help
+ catch {$rd read} e
+ if {$::verbose} { puts "SYNC addReplyDeferredLen: $e" }
+ assert_equal "PONG" [r ping]
+
+ # Check we got the warning logs about the XINFO HELP command.
+ verify_log_message 0 "*Replica generated a reply to command 'xinfo|help', disconnecting it: *" $lines
+
+ $rd close
+ waitForBgsave r
+ }
+
+ test "replica do not write the reply to the replication link - PSYNC (_addReplyToBufferOrList)" {
+ set rd [redis_deferring_client]
+ set lines [count_log_lines 0]
+
+ $rd psync replicationid -1
+ assert_match {FULLRESYNC * 0} [$rd read]
+ $rd get foo
+ catch {$rd read} e
+ if {$::verbose} { puts "PSYNC _addReplyToBufferOrList: $e" }
+ assert_equal "PONG" [r ping]
+
+ # Check we got the warning logs about the GET command.
+ verify_log_message 0 "*Replica generated a reply to command 'get', disconnecting it: *" $lines
+ verify_log_message 0 "*== CRITICAL == This master is sending an error to its replica: *" $lines
+ verify_log_message 0 "*Replica can't interact with the keyspace*" $lines
+
+ $rd close
+ waitForBgsave r
+ }
+
+ test "replica do not write the reply to the replication link - PSYNC (addReplyDeferredLen)" {
+ set rd [redis_deferring_client]
+ set lines [count_log_lines 0]
+
+ $rd psync replicationid -1
+ assert_match {FULLRESYNC * 0} [$rd read]
+ $rd slowlog get
+ catch {$rd read} e
+ if {$::verbose} { puts "PSYNC addReplyDeferredLen: $e" }
+ assert_equal "PONG" [r ping]
+
+ # Check we got the warning logs about the SLOWLOG GET command.
+ verify_log_message 0 "*Replica generated a reply to command 'slowlog|get', disconnecting it: *" $lines
+
+ $rd close
+ waitForBgsave r
+ }
+
+ test "PSYNC with wrong offset should throw error" {
+ # It used to accept the FULL SYNC, but also replied with an error.
+ assert_error {ERR value is not an integer or out of range} {r psync replicationid offset_str}
+ set logs [exec tail -n 100 < [srv 0 stdout]]
+ assert_match {*Replica * asks for synchronization but with a wrong offset} $logs
+ assert_equal "PONG" [r ping]
+ }
+}
+
+start_server {tags {"repl external:skip"}} {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+ $master debug SET-ACTIVE-EXPIRE 0
+ start_server {} {
+ set slave [srv 0 client]
+ $slave debug SET-ACTIVE-EXPIRE 0
+ $slave slaveof $master_host $master_port
+
+ test "Test replication with lazy expire" {
+ # wait for replication to be in sync
+ wait_for_condition 50 100 {
+ [lindex [$slave role] 0] eq {slave} &&
+ [string match {*master_link_status:up*} [$slave info replication]]
+ } else {
+ fail "Can't turn the instance into a replica"
+ }
+
+ $master sadd s foo
+ $master pexpire s 1
+ after 10
+ $master sadd s foo
+ assert_equal 1 [$master wait 1 0]
+
+ assert_equal "set" [$master type s]
+ assert_equal "set" [$slave type s]
+ }
+ }
+}
diff --git a/tests/integration/shutdown.tcl b/tests/integration/shutdown.tcl
new file mode 100644
index 0000000..b2ec32c
--- /dev/null
+++ b/tests/integration/shutdown.tcl
@@ -0,0 +1,234 @@
+# This test suite tests shutdown when there are lagging replicas connected.
+
+# Fill up the OS socket send buffer for the replica connection 1M at a time.
+# When the replication buffer memory increases beyond 2M (often after writing 4M
+# or so), we assume it's because the OS socket send buffer can't swallow
+# anymore.
+proc fill_up_os_socket_send_buffer_for_repl {idx} {
+ set i 0
+ while {1} {
+ incr i
+ populate 1024 junk$i: 1024 $idx
+ after 10
+ set buf_size [s $idx mem_total_replication_buffers]
+ if {$buf_size > 2*1024*1024} {
+ break
+ }
+ }
+}
+
+foreach how {sigterm shutdown} {
+ test "Shutting down master waits for replica to catch up ($how)" {
+ start_server {overrides {save ""}} {
+ start_server {overrides {save ""}} {
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+ set master_pid [srv -1 pid]
+ set replica [srv 0 client]
+ set replica_pid [srv 0 pid]
+
+ # Config master.
+ $master config set shutdown-timeout 300; # 5min for slow CI
+ $master config set repl-backlog-size 1; # small as possible
+ $master config set hz 100; # cron runs every 10ms
+
+ # Config replica.
+ $replica replicaof $master_host $master_port
+ wait_for_sync $replica
+
+ # Preparation: Set k to 1 on both master and replica.
+ $master set k 1
+ wait_for_ofs_sync $master $replica
+
+ # Pause the replica.
+ pause_process $replica_pid
+
+ # Fill up the OS socket send buffer for the replica connection
+ # to prevent the following INCR from reaching the replica via
+ # the OS.
+ fill_up_os_socket_send_buffer_for_repl -1
+
+ # Incr k and immediately shutdown master.
+ $master incr k
+ switch $how {
+ sigterm {
+ exec kill -SIGTERM $master_pid
+ }
+ shutdown {
+ set rd [redis_deferring_client -1]
+ $rd shutdown
+ }
+ }
+ wait_for_condition 50 100 {
+ [s -1 shutdown_in_milliseconds] > 0
+ } else {
+ fail "Master not indicating ongoing shutdown."
+ }
+
+ # Wake up replica and check if master has waited for it.
+ after 20; # 2 cron intervals
+ resume_process $replica_pid
+ wait_for_condition 300 1000 {
+ [$replica get k] eq 2
+ } else {
+ fail "Master exited before replica could catch up."
+ }
+
+ # Check shutdown log messages on master
+ wait_for_log_messages -1 {"*ready to exit, bye bye*"} 0 100 500
+ assert_equal 0 [count_log_message -1 "*Lagging replica*"]
+ verify_log_message -1 "*1 of 1 replicas are in sync*" 0
+ }
+ }
+ } {} {repl external:skip}
+}
+
+test {Shutting down master waits for replica timeout} {
+ start_server {overrides {save ""}} {
+ start_server {overrides {save ""}} {
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+ set master_pid [srv -1 pid]
+ set replica [srv 0 client]
+ set replica_pid [srv 0 pid]
+
+ # Config master.
+ $master config set shutdown-timeout 1; # second
+
+ # Config replica.
+ $replica replicaof $master_host $master_port
+ wait_for_sync $replica
+
+ # Preparation: Set k to 1 on both master and replica.
+ $master set k 1
+ wait_for_ofs_sync $master $replica
+
+ # Pause the replica.
+ pause_process $replica_pid
+
+ # Fill up the OS socket send buffer for the replica connection to
+ # prevent the following INCR k from reaching the replica via the OS.
+ fill_up_os_socket_send_buffer_for_repl -1
+
+ # Incr k and immediately shutdown master.
+ $master incr k
+ exec kill -SIGTERM $master_pid
+ wait_for_condition 50 100 {
+ [s -1 shutdown_in_milliseconds] > 0
+ } else {
+ fail "Master not indicating ongoing shutdown."
+ }
+
+ # Let master finish shutting down and check log.
+ wait_for_log_messages -1 {"*ready to exit, bye bye*"} 0 100 100
+ verify_log_message -1 "*Lagging replica*" 0
+ verify_log_message -1 "*0 of 1 replicas are in sync*" 0
+
+ # Wake up replica.
+ resume_process $replica_pid
+ assert_equal 1 [$replica get k]
+ }
+ }
+} {} {repl external:skip}
+
+test "Shutting down master waits for replica then fails" {
+ start_server {overrides {save ""}} {
+ start_server {overrides {save ""}} {
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+ set master_pid [srv -1 pid]
+ set replica [srv 0 client]
+ set replica_pid [srv 0 pid]
+
+ # Config master and replica.
+ $replica replicaof $master_host $master_port
+ wait_for_sync $replica
+
+ # Pause the replica and write a key on master.
+ pause_process $replica_pid
+ $master incr k
+
+ # Two clients call blocking SHUTDOWN in parallel.
+ set rd1 [redis_deferring_client -1]
+ set rd2 [redis_deferring_client -1]
+ $rd1 shutdown
+ $rd2 shutdown
+ set info_clients [$master info clients]
+ assert_match "*connected_clients:3*" $info_clients
+ assert_match "*blocked_clients:2*" $info_clients
+
+ # Start a very slow initial AOFRW, which will prevent shutdown.
+ $master config set rdb-key-save-delay 30000000; # 30 seconds
+ $master config set appendonly yes
+
+ # Wake up replica, causing master to continue shutting down.
+ resume_process $replica_pid
+
+ # SHUTDOWN returns an error to both clients blocking on SHUTDOWN.
+ catch { $rd1 read } e1
+ catch { $rd2 read } e2
+ assert_match "*Errors trying to SHUTDOWN. Check logs*" $e1
+ assert_match "*Errors trying to SHUTDOWN. Check logs*" $e2
+ $rd1 close
+ $rd2 close
+
+ # Check shutdown log messages on master.
+ verify_log_message -1 "*1 of 1 replicas are in sync*" 0
+ verify_log_message -1 "*Writing initial AOF, can't exit*" 0
+ verify_log_message -1 "*Errors trying to shut down*" 0
+
+ # Let master to exit fast, without waiting for the very slow AOFRW.
+ catch {$master shutdown nosave force}
+ }
+ }
+} {} {repl external:skip}
+
+test "Shutting down master waits for replica then aborted" {
+ start_server {overrides {save ""}} {
+ start_server {overrides {save ""}} {
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+ set master_pid [srv -1 pid]
+ set replica [srv 0 client]
+ set replica_pid [srv 0 pid]
+
+ # Config master and replica.
+ $replica replicaof $master_host $master_port
+ wait_for_sync $replica
+
+ # Pause the replica and write a key on master.
+ pause_process $replica_pid
+ $master incr k
+
+ # Two clients call blocking SHUTDOWN in parallel.
+ set rd1 [redis_deferring_client -1]
+ set rd2 [redis_deferring_client -1]
+ $rd1 shutdown
+ $rd2 shutdown
+ set info_clients [$master info clients]
+ assert_match "*connected_clients:3*" $info_clients
+ assert_match "*blocked_clients:2*" $info_clients
+
+ # Abort the shutdown
+ $master shutdown abort
+
+ # Wake up replica, causing master to continue shutting down.
+ resume_process $replica_pid
+
+ # SHUTDOWN returns an error to both clients blocking on SHUTDOWN.
+ catch { $rd1 read } e1
+ catch { $rd2 read } e2
+ assert_match "*Errors trying to SHUTDOWN. Check logs*" $e1
+ assert_match "*Errors trying to SHUTDOWN. Check logs*" $e2
+ $rd1 close
+ $rd2 close
+
+ # Check shutdown log messages on master.
+ verify_log_message -1 "*Shutdown manually aborted*" 0
+ }
+ }
+} {} {repl external:skip}
diff --git a/tests/modules/Makefile b/tests/modules/Makefile
new file mode 100644
index 0000000..d63c854
--- /dev/null
+++ b/tests/modules/Makefile
@@ -0,0 +1,83 @@
+
+# find the OS
+uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not')
+
+warning_cflags = -W -Wall -Wno-missing-field-initializers
+ifeq ($(uname_S),Darwin)
+ SHOBJ_CFLAGS ?= $(warning_cflags) -dynamic -fno-common -g -ggdb -std=c99 -O2
+ SHOBJ_LDFLAGS ?= -bundle -undefined dynamic_lookup
+else # Linux, others
+ SHOBJ_CFLAGS ?= $(warning_cflags) -fno-common -g -ggdb -std=c99 -O2
+ SHOBJ_LDFLAGS ?= -shared
+endif
+
+ifeq ($(uname_S),Linux)
+ LD = gcc
+ CC = gcc
+endif
+
+# OS X 11.x doesn't have /usr/lib/libSystem.dylib and needs an explicit setting.
+ifeq ($(uname_S),Darwin)
+ifeq ("$(wildcard /usr/lib/libSystem.dylib)","")
+LIBS = -L /Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/lib -lsystem
+endif
+endif
+
+TEST_MODULES = \
+ commandfilter.so \
+ basics.so \
+ testrdb.so \
+ fork.so \
+ infotest.so \
+ propagate.so \
+ misc.so \
+ hooks.so \
+ blockonkeys.so \
+ blockonbackground.so \
+ scan.so \
+ datatype.so \
+ datatype2.so \
+ auth.so \
+ keyspace_events.so \
+ blockedclient.so \
+ getkeys.so \
+ getchannels.so \
+ test_lazyfree.so \
+ timer.so \
+ defragtest.so \
+ keyspecs.so \
+ hash.so \
+ zset.so \
+ stream.so \
+ mallocsize.so \
+ aclcheck.so \
+ list.so \
+ subcommands.so \
+ reply.so \
+ cmdintrospection.so \
+ eventloop.so \
+ moduleconfigs.so \
+ moduleconfigstwo.so \
+ publish.so \
+ usercall.so \
+ postnotifications.so \
+ moduleauthtwo.so \
+ rdbloadsave.so
+
+.PHONY: all
+
+all: $(TEST_MODULES)
+
+32bit:
+ $(MAKE) CFLAGS="-m32" LDFLAGS="-m32"
+
+%.xo: %.c ../../src/redismodule.h
+ $(CC) -I../../src $(CFLAGS) $(SHOBJ_CFLAGS) -fPIC -c $< -o $@
+
+%.so: %.xo
+ $(LD) -o $@ $^ $(SHOBJ_LDFLAGS) $(LDFLAGS) $(LIBS)
+
+.PHONY: clean
+
+clean:
+ rm -f $(TEST_MODULES) $(TEST_MODULES:.so=.xo)
diff --git a/tests/modules/aclcheck.c b/tests/modules/aclcheck.c
new file mode 100644
index 0000000..09b525c
--- /dev/null
+++ b/tests/modules/aclcheck.c
@@ -0,0 +1,269 @@
+
+#include "redismodule.h"
+#include <errno.h>
+#include <assert.h>
+#include <string.h>
+#include <strings.h>
+
+/* A wrap for SET command with ACL check on the key. */
+int set_aclcheck_key(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc < 4) {
+ return RedisModule_WrongArity(ctx);
+ }
+
+ int permissions;
+ const char *flags = RedisModule_StringPtrLen(argv[1], NULL);
+
+ if (!strcasecmp(flags, "W")) {
+ permissions = REDISMODULE_CMD_KEY_UPDATE;
+ } else if (!strcasecmp(flags, "R")) {
+ permissions = REDISMODULE_CMD_KEY_ACCESS;
+ } else if (!strcasecmp(flags, "*")) {
+ permissions = REDISMODULE_CMD_KEY_UPDATE | REDISMODULE_CMD_KEY_ACCESS;
+ } else if (!strcasecmp(flags, "~")) {
+ permissions = 0; /* Requires either read or write */
+ } else {
+ RedisModule_ReplyWithError(ctx, "INVALID FLAGS");
+ return REDISMODULE_OK;
+ }
+
+ /* Check that the key can be accessed */
+ RedisModuleString *user_name = RedisModule_GetCurrentUserName(ctx);
+ RedisModuleUser *user = RedisModule_GetModuleUserFromUserName(user_name);
+ int ret = RedisModule_ACLCheckKeyPermissions(user, argv[2], permissions);
+ if (ret != 0) {
+ RedisModule_ReplyWithError(ctx, "DENIED KEY");
+ RedisModule_FreeModuleUser(user);
+ RedisModule_FreeString(ctx, user_name);
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleCallReply *rep = RedisModule_Call(ctx, "SET", "v", argv + 2, argc - 2);
+ if (!rep) {
+ RedisModule_ReplyWithError(ctx, "NULL reply returned");
+ } else {
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ }
+
+ RedisModule_FreeModuleUser(user);
+ RedisModule_FreeString(ctx, user_name);
+ return REDISMODULE_OK;
+}
+
+/* A wrap for PUBLISH command with ACL check on the channel. */
+int publish_aclcheck_channel(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 3) {
+ return RedisModule_WrongArity(ctx);
+ }
+
+ /* Check that the pubsub channel can be accessed */
+ RedisModuleString *user_name = RedisModule_GetCurrentUserName(ctx);
+ RedisModuleUser *user = RedisModule_GetModuleUserFromUserName(user_name);
+ int ret = RedisModule_ACLCheckChannelPermissions(user, argv[1], REDISMODULE_CMD_CHANNEL_SUBSCRIBE);
+ if (ret != 0) {
+ RedisModule_ReplyWithError(ctx, "DENIED CHANNEL");
+ RedisModule_FreeModuleUser(user);
+ RedisModule_FreeString(ctx, user_name);
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleCallReply *rep = RedisModule_Call(ctx, "PUBLISH", "v", argv + 1, argc - 1);
+ if (!rep) {
+ RedisModule_ReplyWithError(ctx, "NULL reply returned");
+ } else {
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ }
+
+ RedisModule_FreeModuleUser(user);
+ RedisModule_FreeString(ctx, user_name);
+ return REDISMODULE_OK;
+}
+
+/* A wrap for RM_Call that check first that the command can be executed */
+int rm_call_aclcheck_cmd(RedisModuleCtx *ctx, RedisModuleUser *user, RedisModuleString **argv, int argc) {
+ if (argc < 2) {
+ return RedisModule_WrongArity(ctx);
+ }
+
+ /* Check that the command can be executed */
+ int ret = RedisModule_ACLCheckCommandPermissions(user, argv + 1, argc - 1);
+ if (ret != 0) {
+ RedisModule_ReplyWithError(ctx, "DENIED CMD");
+ /* Add entry to ACL log */
+ RedisModule_ACLAddLogEntry(ctx, user, argv[1], REDISMODULE_ACL_LOG_CMD);
+ return REDISMODULE_OK;
+ }
+
+ const char* cmd = RedisModule_StringPtrLen(argv[1], NULL);
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, cmd, "v", argv + 2, argc - 2);
+ if(!rep){
+ RedisModule_ReplyWithError(ctx, "NULL reply returned");
+ }else{
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ }
+
+ return REDISMODULE_OK;
+}
+
+int rm_call_aclcheck_cmd_default_user(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ RedisModuleString *user_name = RedisModule_GetCurrentUserName(ctx);
+ RedisModuleUser *user = RedisModule_GetModuleUserFromUserName(user_name);
+
+ int res = rm_call_aclcheck_cmd(ctx, user, argv, argc);
+
+ RedisModule_FreeModuleUser(user);
+ RedisModule_FreeString(ctx, user_name);
+ return res;
+}
+
+int rm_call_aclcheck_cmd_module_user(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ /* Create a user and authenticate */
+ RedisModuleUser *user = RedisModule_CreateModuleUser("testuser1");
+ RedisModule_SetModuleUserACL(user, "allcommands");
+ RedisModule_SetModuleUserACL(user, "allkeys");
+ RedisModule_SetModuleUserACL(user, "on");
+ RedisModule_AuthenticateClientWithUser(ctx, user, NULL, NULL, NULL);
+
+ int res = rm_call_aclcheck_cmd(ctx, user, argv, argc);
+
+ /* authenticated back to "default" user (so once we free testuser1 we will not disconnected */
+ RedisModule_AuthenticateClientWithACLUser(ctx, "default", 7, NULL, NULL, NULL);
+ RedisModule_FreeModuleUser(user);
+ return res;
+}
+
+int rm_call_aclcheck_with_errors(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if(argc < 2){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ const char* cmd = RedisModule_StringPtrLen(argv[1], NULL);
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, cmd, "vEC", argv + 2, argc - 2);
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ return REDISMODULE_OK;
+}
+
+/* A wrap for RM_Call that pass the 'C' flag to do ACL check on the command. */
+int rm_call_aclcheck(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if(argc < 2){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ const char* cmd = RedisModule_StringPtrLen(argv[1], NULL);
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, cmd, "vC", argv + 2, argc - 2);
+ if(!rep) {
+ char err[100];
+ switch (errno) {
+ case EACCES:
+ RedisModule_ReplyWithError(ctx, "ERR NOPERM");
+ break;
+ default:
+ snprintf(err, sizeof(err) - 1, "ERR errno=%d", errno);
+ RedisModule_ReplyWithError(ctx, err);
+ break;
+ }
+ } else {
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ }
+
+ return REDISMODULE_OK;
+}
+
+int module_test_acl_category(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+int commandBlockCheck(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ int response_ok = 0;
+ int result = RedisModule_CreateCommand(ctx,"command.that.should.fail", module_test_acl_category, "", 0, 0, 0);
+ response_ok |= (result == REDISMODULE_OK);
+
+ RedisModuleCommand *parent = RedisModule_GetCommand(ctx,"block.commands.outside.onload");
+ result = RedisModule_SetCommandACLCategories(parent, "write");
+ response_ok |= (result == REDISMODULE_OK);
+
+ result = RedisModule_CreateSubcommand(parent,"subcommand.that.should.fail",module_test_acl_category,"",0,0,0);
+ response_ok |= (result == REDISMODULE_OK);
+
+ /* This validates that it's not possible to create commands outside OnLoad,
+ * thus returns an error if they succeed. */
+ if (response_ok) {
+ RedisModule_ReplyWithError(ctx, "UNEXPECTEDOK");
+ } else {
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ }
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx,"aclcheck",1,REDISMODULE_APIVER_1)== REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"aclcheck.set.check.key", set_aclcheck_key,"write",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"block.commands.outside.onload", commandBlockCheck,"write",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"aclcheck.module.command.aclcategories.write", module_test_acl_category,"write",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ RedisModuleCommand *aclcategories_write = RedisModule_GetCommand(ctx,"aclcheck.module.command.aclcategories.write");
+
+ if (RedisModule_SetCommandACLCategories(aclcategories_write, "write") == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"aclcheck.module.command.aclcategories.write.function.read.category", module_test_acl_category,"write",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ RedisModuleCommand *read_category = RedisModule_GetCommand(ctx,"aclcheck.module.command.aclcategories.write.function.read.category");
+
+ if (RedisModule_SetCommandACLCategories(read_category, "read") == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"aclcheck.module.command.aclcategories.read.only.category", module_test_acl_category,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ RedisModuleCommand *read_only_category = RedisModule_GetCommand(ctx,"aclcheck.module.command.aclcategories.read.only.category");
+
+ if (RedisModule_SetCommandACLCategories(read_only_category, "read") == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"aclcheck.publish.check.channel", publish_aclcheck_channel,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"aclcheck.rm_call.check.cmd", rm_call_aclcheck_cmd_default_user,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"aclcheck.rm_call.check.cmd.module.user", rm_call_aclcheck_cmd_module_user,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"aclcheck.rm_call", rm_call_aclcheck,
+ "write",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"aclcheck.rm_call_with_errors", rm_call_aclcheck_with_errors,
+ "write",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/auth.c b/tests/modules/auth.c
new file mode 100644
index 0000000..19be95a
--- /dev/null
+++ b/tests/modules/auth.c
@@ -0,0 +1,270 @@
+/* define macros for having usleep */
+#define _BSD_SOURCE
+#define _DEFAULT_SOURCE
+
+#include "redismodule.h"
+
+#include <string.h>
+#include <unistd.h>
+#include <pthread.h>
+
+#define UNUSED(V) ((void) V)
+
+// A simple global user
+static RedisModuleUser *global = NULL;
+static long long client_change_delta = 0;
+
+void UserChangedCallback(uint64_t client_id, void *privdata) {
+ REDISMODULE_NOT_USED(privdata);
+ REDISMODULE_NOT_USED(client_id);
+ client_change_delta++;
+}
+
+int Auth_CreateModuleUser(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (global) {
+ RedisModule_FreeModuleUser(global);
+ }
+
+ global = RedisModule_CreateModuleUser("global");
+ RedisModule_SetModuleUserACL(global, "allcommands");
+ RedisModule_SetModuleUserACL(global, "allkeys");
+ RedisModule_SetModuleUserACL(global, "on");
+
+ return RedisModule_ReplyWithSimpleString(ctx, "OK");
+}
+
+int Auth_AuthModuleUser(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ uint64_t client_id;
+ RedisModule_AuthenticateClientWithUser(ctx, global, UserChangedCallback, NULL, &client_id);
+
+ return RedisModule_ReplyWithLongLong(ctx, (uint64_t) client_id);
+}
+
+int Auth_AuthRealUser(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) return RedisModule_WrongArity(ctx);
+
+ size_t length;
+ uint64_t client_id;
+
+ RedisModuleString *user_string = argv[1];
+ const char *name = RedisModule_StringPtrLen(user_string, &length);
+
+ if (RedisModule_AuthenticateClientWithACLUser(ctx, name, length,
+ UserChangedCallback, NULL, &client_id) == REDISMODULE_ERR) {
+ return RedisModule_ReplyWithError(ctx, "Invalid user");
+ }
+
+ return RedisModule_ReplyWithLongLong(ctx, (uint64_t) client_id);
+}
+
+/* This command redacts every other arguments and returns OK */
+int Auth_RedactedAPI(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ for(int i = argc - 1; i > 0; i -= 2) {
+ int result = RedisModule_RedactClientCommandArgument(ctx, i);
+ RedisModule_Assert(result == REDISMODULE_OK);
+ }
+ return RedisModule_ReplyWithSimpleString(ctx, "OK");
+}
+
+int Auth_ChangeCount(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ long long result = client_change_delta;
+ client_change_delta = 0;
+ return RedisModule_ReplyWithLongLong(ctx, result);
+}
+
+/* The Module functionality below validates that module authentication callbacks can be registered
+ * to support both non-blocking and blocking module based authentication. */
+
+/* Non Blocking Module Auth callback / implementation. */
+int auth_cb(RedisModuleCtx *ctx, RedisModuleString *username, RedisModuleString *password, RedisModuleString **err) {
+ const char *user = RedisModule_StringPtrLen(username, NULL);
+ const char *pwd = RedisModule_StringPtrLen(password, NULL);
+ if (!strcmp(user,"foo") && !strcmp(pwd,"allow")) {
+ RedisModule_AuthenticateClientWithACLUser(ctx, "foo", 3, NULL, NULL, NULL);
+ return REDISMODULE_AUTH_HANDLED;
+ }
+ else if (!strcmp(user,"foo") && !strcmp(pwd,"deny")) {
+ RedisModuleString *log = RedisModule_CreateString(ctx, "Module Auth", 11);
+ RedisModule_ACLAddLogEntryByUserName(ctx, username, log, REDISMODULE_ACL_LOG_AUTH);
+ RedisModule_FreeString(ctx, log);
+ const char *err_msg = "Auth denied by Misc Module.";
+ *err = RedisModule_CreateString(ctx, err_msg, strlen(err_msg));
+ return REDISMODULE_AUTH_HANDLED;
+ }
+ return REDISMODULE_AUTH_NOT_HANDLED;
+}
+
+int test_rm_register_auth_cb(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ RedisModule_RegisterAuthCallback(ctx, auth_cb);
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+/*
+ * The thread entry point that actually executes the blocking part of the AUTH command.
+ * This function sleeps for 0.5 seconds and then unblocks the client which will later call
+ * `AuthBlock_Reply`.
+ * `arg` is expected to contain the RedisModuleBlockedClient, username, and password.
+ */
+void *AuthBlock_ThreadMain(void *arg) {
+ usleep(500000);
+ void **targ = arg;
+ RedisModuleBlockedClient *bc = targ[0];
+ int result = 2;
+ const char *user = RedisModule_StringPtrLen(targ[1], NULL);
+ const char *pwd = RedisModule_StringPtrLen(targ[2], NULL);
+ if (!strcmp(user,"foo") && !strcmp(pwd,"block_allow")) {
+ result = 1;
+ }
+ else if (!strcmp(user,"foo") && !strcmp(pwd,"block_deny")) {
+ result = 0;
+ }
+ else if (!strcmp(user,"foo") && !strcmp(pwd,"block_abort")) {
+ RedisModule_BlockedClientMeasureTimeEnd(bc);
+ RedisModule_AbortBlock(bc);
+ goto cleanup;
+ }
+ /* Provide the result to the blocking reply cb. */
+ void **replyarg = RedisModule_Alloc(sizeof(void*));
+ replyarg[0] = (void *) (uintptr_t) result;
+ RedisModule_BlockedClientMeasureTimeEnd(bc);
+ RedisModule_UnblockClient(bc, replyarg);
+cleanup:
+ /* Free the username and password and thread / arg data. */
+ RedisModule_FreeString(NULL, targ[1]);
+ RedisModule_FreeString(NULL, targ[2]);
+ RedisModule_Free(targ);
+ return NULL;
+}
+
+/*
+ * Reply callback for a blocking AUTH command. This is called when the client is unblocked.
+ */
+int AuthBlock_Reply(RedisModuleCtx *ctx, RedisModuleString *username, RedisModuleString *password, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(password);
+ void **targ = RedisModule_GetBlockedClientPrivateData(ctx);
+ int result = (uintptr_t) targ[0];
+ size_t userlen = 0;
+ const char *user = RedisModule_StringPtrLen(username, &userlen);
+ /* Handle the success case by authenticating. */
+ if (result == 1) {
+ RedisModule_AuthenticateClientWithACLUser(ctx, user, userlen, NULL, NULL, NULL);
+ return REDISMODULE_AUTH_HANDLED;
+ }
+ /* Handle the Error case by denying auth */
+ else if (result == 0) {
+ RedisModuleString *log = RedisModule_CreateString(ctx, "Module Auth", 11);
+ RedisModule_ACLAddLogEntryByUserName(ctx, username, log, REDISMODULE_ACL_LOG_AUTH);
+ RedisModule_FreeString(ctx, log);
+ const char *err_msg = "Auth denied by Misc Module.";
+ *err = RedisModule_CreateString(ctx, err_msg, strlen(err_msg));
+ return REDISMODULE_AUTH_HANDLED;
+ }
+ /* "Skip" Authentication */
+ return REDISMODULE_AUTH_NOT_HANDLED;
+}
+
+/* Private data freeing callback for Module Auth. */
+void AuthBlock_FreeData(RedisModuleCtx *ctx, void *privdata) {
+ REDISMODULE_NOT_USED(ctx);
+ RedisModule_Free(privdata);
+}
+
+/* Callback triggered when the engine attempts module auth
+ * Return code here is one of the following: Auth succeeded, Auth denied,
+ * Auth not handled, Auth blocked.
+ * The Module can have auth succeed / denied here itself, but this is an example
+ * of blocking module auth.
+ */
+int blocking_auth_cb(RedisModuleCtx *ctx, RedisModuleString *username, RedisModuleString *password, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(username);
+ REDISMODULE_NOT_USED(password);
+ REDISMODULE_NOT_USED(err);
+ /* Block the client from the Module. */
+ RedisModuleBlockedClient *bc = RedisModule_BlockClientOnAuth(ctx, AuthBlock_Reply, AuthBlock_FreeData);
+ int ctx_flags = RedisModule_GetContextFlags(ctx);
+ if (ctx_flags & REDISMODULE_CTX_FLAGS_MULTI || ctx_flags & REDISMODULE_CTX_FLAGS_LUA) {
+ /* Clean up by using RedisModule_UnblockClient since we attempted blocking the client. */
+ RedisModule_UnblockClient(bc, NULL);
+ return REDISMODULE_AUTH_HANDLED;
+ }
+ RedisModule_BlockedClientMeasureTimeStart(bc);
+ pthread_t tid;
+ /* Allocate memory for information needed. */
+ void **targ = RedisModule_Alloc(sizeof(void*)*3);
+ targ[0] = bc;
+ targ[1] = RedisModule_CreateStringFromString(NULL, username);
+ targ[2] = RedisModule_CreateStringFromString(NULL, password);
+ /* Create bg thread and pass the blockedclient, username and password to it. */
+ if (pthread_create(&tid, NULL, AuthBlock_ThreadMain, targ) != 0) {
+ RedisModule_AbortBlock(bc);
+ }
+ return REDISMODULE_AUTH_HANDLED;
+}
+
+int test_rm_register_blocking_auth_cb(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ RedisModule_RegisterAuthCallback(ctx, blocking_auth_cb);
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+/* This function must be present on each Redis module. It is used in order to
+ * register the commands into the Redis server. */
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx,"testacl",1,REDISMODULE_APIVER_1)
+ == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"auth.authrealuser",
+ Auth_AuthRealUser,"no-auth",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"auth.createmoduleuser",
+ Auth_CreateModuleUser,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"auth.authmoduleuser",
+ Auth_AuthModuleUser,"no-auth",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"auth.changecount",
+ Auth_ChangeCount,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"auth.redact",
+ Auth_RedactedAPI,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"testmoduleone.rm_register_auth_cb",
+ test_rm_register_auth_cb,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"testmoduleone.rm_register_blocking_auth_cb",
+ test_rm_register_blocking_auth_cb,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnUnload(RedisModuleCtx *ctx) {
+ UNUSED(ctx);
+
+ if (global)
+ RedisModule_FreeModuleUser(global);
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/basics.c b/tests/modules/basics.c
new file mode 100644
index 0000000..897cb5d
--- /dev/null
+++ b/tests/modules/basics.c
@@ -0,0 +1,1052 @@
+/* Module designed to test the Redis modules subsystem.
+ *
+ * -----------------------------------------------------------------------------
+ *
+ * Copyright (c) 2016, Salvatore Sanfilippo <antirez at gmail dot com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Redis nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "redismodule.h"
+#include <string.h>
+#include <stdlib.h>
+
+/* --------------------------------- Helpers -------------------------------- */
+
+/* Return true if the reply and the C null term string matches. */
+int TestMatchReply(RedisModuleCallReply *reply, char *str) {
+ RedisModuleString *mystr;
+ mystr = RedisModule_CreateStringFromCallReply(reply);
+ if (!mystr) return 0;
+ const char *ptr = RedisModule_StringPtrLen(mystr,NULL);
+ return strcmp(ptr,str) == 0;
+}
+
+/* ------------------------------- Test units ------------------------------- */
+
+/* TEST.CALL -- Test Call() API. */
+int TestCall(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_AutoMemory(ctx);
+ RedisModuleCallReply *reply;
+
+ RedisModule_Call(ctx,"DEL","c","mylist");
+ RedisModuleString *mystr = RedisModule_CreateString(ctx,"foo",3);
+ RedisModule_Call(ctx,"RPUSH","csl","mylist",mystr,(long long)1234);
+ reply = RedisModule_Call(ctx,"LRANGE","ccc","mylist","0","-1");
+ long long items = RedisModule_CallReplyLength(reply);
+ if (items != 2) goto fail;
+
+ RedisModuleCallReply *item0, *item1;
+
+ item0 = RedisModule_CallReplyArrayElement(reply,0);
+ item1 = RedisModule_CallReplyArrayElement(reply,1);
+ if (!TestMatchReply(item0,"foo")) goto fail;
+ if (!TestMatchReply(item1,"1234")) goto fail;
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+
+fail:
+ RedisModule_ReplyWithSimpleString(ctx,"ERR");
+ return REDISMODULE_OK;
+}
+
+int TestCallResp3Attribute(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_AutoMemory(ctx);
+ RedisModuleCallReply *reply;
+
+ reply = RedisModule_Call(ctx,"DEBUG","3cc" ,"PROTOCOL", "attrib"); /* 3 stands for resp 3 reply */
+ if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_STRING) goto fail;
+
+ /* make sure we can not reply to resp2 client with resp3 (it might be a string but it contains attribute) */
+ if (RedisModule_ReplyWithCallReply(ctx, reply) != REDISMODULE_ERR) goto fail;
+
+ if (!TestMatchReply(reply,"Some real reply following the attribute")) goto fail;
+
+ reply = RedisModule_CallReplyAttribute(reply);
+ if (!reply || RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_ATTRIBUTE) goto fail;
+ /* make sure we can not reply to resp2 client with resp3 attribute */
+ if (RedisModule_ReplyWithCallReply(ctx, reply) != REDISMODULE_ERR) goto fail;
+ if (RedisModule_CallReplyLength(reply) != 1) goto fail;
+
+ RedisModuleCallReply *key, *val;
+ if (RedisModule_CallReplyAttributeElement(reply,0,&key,&val) != REDISMODULE_OK) goto fail;
+ if (!TestMatchReply(key,"key-popularity")) goto fail;
+ if (RedisModule_CallReplyType(val) != REDISMODULE_REPLY_ARRAY) goto fail;
+ if (RedisModule_CallReplyLength(val) != 2) goto fail;
+ if (!TestMatchReply(RedisModule_CallReplyArrayElement(val, 0),"key:123")) goto fail;
+ if (!TestMatchReply(RedisModule_CallReplyArrayElement(val, 1),"90")) goto fail;
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+
+fail:
+ RedisModule_ReplyWithSimpleString(ctx,"ERR");
+ return REDISMODULE_OK;
+}
+
+int TestGetResp(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ int flags = RedisModule_GetContextFlags(ctx);
+
+ if (flags & REDISMODULE_CTX_FLAGS_RESP3) {
+ RedisModule_ReplyWithLongLong(ctx, 3);
+ } else {
+ RedisModule_ReplyWithLongLong(ctx, 2);
+ }
+
+ return REDISMODULE_OK;
+}
+
+int TestCallRespAutoMode(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_AutoMemory(ctx);
+ RedisModuleCallReply *reply;
+
+ RedisModule_Call(ctx,"DEL","c","myhash");
+ RedisModule_Call(ctx,"HSET","ccccc","myhash", "f1", "v1", "f2", "v2");
+ /* 0 stands for auto mode, we will get the reply in the same format as the client */
+ reply = RedisModule_Call(ctx,"HGETALL","0c" ,"myhash");
+ RedisModule_ReplyWithCallReply(ctx, reply);
+ return REDISMODULE_OK;
+}
+
+int TestCallResp3Map(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_AutoMemory(ctx);
+ RedisModuleCallReply *reply;
+
+ RedisModule_Call(ctx,"DEL","c","myhash");
+ RedisModule_Call(ctx,"HSET","ccccc","myhash", "f1", "v1", "f2", "v2");
+ reply = RedisModule_Call(ctx,"HGETALL","3c" ,"myhash"); /* 3 stands for resp 3 reply */
+ if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_MAP) goto fail;
+
+ /* make sure we can not reply to resp2 client with resp3 map */
+ if (RedisModule_ReplyWithCallReply(ctx, reply) != REDISMODULE_ERR) goto fail;
+
+ long long items = RedisModule_CallReplyLength(reply);
+ if (items != 2) goto fail;
+
+ RedisModuleCallReply *key0, *key1;
+ RedisModuleCallReply *val0, *val1;
+ if (RedisModule_CallReplyMapElement(reply,0,&key0,&val0) != REDISMODULE_OK) goto fail;
+ if (RedisModule_CallReplyMapElement(reply,1,&key1,&val1) != REDISMODULE_OK) goto fail;
+ if (!TestMatchReply(key0,"f1")) goto fail;
+ if (!TestMatchReply(key1,"f2")) goto fail;
+ if (!TestMatchReply(val0,"v1")) goto fail;
+ if (!TestMatchReply(val1,"v2")) goto fail;
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+
+fail:
+ RedisModule_ReplyWithSimpleString(ctx,"ERR");
+ return REDISMODULE_OK;
+}
+
+int TestCallResp3Bool(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_AutoMemory(ctx);
+ RedisModuleCallReply *reply;
+
+ reply = RedisModule_Call(ctx,"DEBUG","3cc" ,"PROTOCOL", "true"); /* 3 stands for resp 3 reply */
+ if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_BOOL) goto fail;
+ /* make sure we can not reply to resp2 client with resp3 bool */
+ if (RedisModule_ReplyWithCallReply(ctx, reply) != REDISMODULE_ERR) goto fail;
+
+ if (!RedisModule_CallReplyBool(reply)) goto fail;
+ reply = RedisModule_Call(ctx,"DEBUG","3cc" ,"PROTOCOL", "false"); /* 3 stands for resp 3 reply */
+ if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_BOOL) goto fail;
+ if (RedisModule_CallReplyBool(reply)) goto fail;
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+
+fail:
+ RedisModule_ReplyWithSimpleString(ctx,"ERR");
+ return REDISMODULE_OK;
+}
+
+int TestCallResp3Null(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_AutoMemory(ctx);
+ RedisModuleCallReply *reply;
+
+ reply = RedisModule_Call(ctx,"DEBUG","3cc" ,"PROTOCOL", "null"); /* 3 stands for resp 3 reply */
+ if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_NULL) goto fail;
+
+ /* make sure we can not reply to resp2 client with resp3 null */
+ if (RedisModule_ReplyWithCallReply(ctx, reply) != REDISMODULE_ERR) goto fail;
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+
+fail:
+ RedisModule_ReplyWithSimpleString(ctx,"ERR");
+ return REDISMODULE_OK;
+}
+
+int TestCallReplyWithNestedReply(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_AutoMemory(ctx);
+ RedisModuleCallReply *reply;
+
+ RedisModule_Call(ctx,"DEL","c","mylist");
+ RedisModule_Call(ctx,"RPUSH","ccl","mylist","test",(long long)1234);
+ reply = RedisModule_Call(ctx,"LRANGE","ccc","mylist","0","-1");
+ if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_ARRAY) goto fail;
+ if (RedisModule_CallReplyLength(reply) < 1) goto fail;
+ RedisModuleCallReply *nestedReply = RedisModule_CallReplyArrayElement(reply, 0);
+
+ RedisModule_ReplyWithCallReply(ctx,nestedReply);
+ return REDISMODULE_OK;
+
+fail:
+ RedisModule_ReplyWithSimpleString(ctx,"ERR");
+ return REDISMODULE_OK;
+}
+
+int TestCallReplyWithArrayReply(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_AutoMemory(ctx);
+ RedisModuleCallReply *reply;
+
+ RedisModule_Call(ctx,"DEL","c","mylist");
+ RedisModule_Call(ctx,"RPUSH","ccl","mylist","test",(long long)1234);
+ reply = RedisModule_Call(ctx,"LRANGE","ccc","mylist","0","-1");
+ if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_ARRAY) goto fail;
+
+ RedisModule_ReplyWithCallReply(ctx,reply);
+ return REDISMODULE_OK;
+
+fail:
+ RedisModule_ReplyWithSimpleString(ctx,"ERR");
+ return REDISMODULE_OK;
+}
+
+int TestCallResp3Double(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_AutoMemory(ctx);
+ RedisModuleCallReply *reply;
+
+ reply = RedisModule_Call(ctx,"DEBUG","3cc" ,"PROTOCOL", "double"); /* 3 stands for resp 3 reply */
+ if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_DOUBLE) goto fail;
+
+ /* make sure we can not reply to resp2 client with resp3 double*/
+ if (RedisModule_ReplyWithCallReply(ctx, reply) != REDISMODULE_ERR) goto fail;
+
+ double d = RedisModule_CallReplyDouble(reply);
+ /* we compare strings, since comparing doubles directly can fail in various architectures, e.g. 32bit */
+ char got[30], expected[30];
+ snprintf(got, sizeof(got), "%.17g", d);
+ snprintf(expected, sizeof(expected), "%.17g", 3.141);
+ if (strcmp(got, expected) != 0) goto fail;
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+
+fail:
+ RedisModule_ReplyWithSimpleString(ctx,"ERR");
+ return REDISMODULE_OK;
+}
+
+int TestCallResp3BigNumber(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_AutoMemory(ctx);
+ RedisModuleCallReply *reply;
+
+ reply = RedisModule_Call(ctx,"DEBUG","3cc" ,"PROTOCOL", "bignum"); /* 3 stands for resp 3 reply */
+ if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_BIG_NUMBER) goto fail;
+
+ /* make sure we can not reply to resp2 client with resp3 big number */
+ if (RedisModule_ReplyWithCallReply(ctx, reply) != REDISMODULE_ERR) goto fail;
+
+ size_t len;
+ const char* big_num = RedisModule_CallReplyBigNumber(reply, &len);
+ RedisModule_ReplyWithStringBuffer(ctx,big_num,len);
+ return REDISMODULE_OK;
+
+fail:
+ RedisModule_ReplyWithSimpleString(ctx,"ERR");
+ return REDISMODULE_OK;
+}
+
+int TestCallResp3Verbatim(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_AutoMemory(ctx);
+ RedisModuleCallReply *reply;
+
+ reply = RedisModule_Call(ctx,"DEBUG","3cc" ,"PROTOCOL", "verbatim"); /* 3 stands for resp 3 reply */
+ if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_VERBATIM_STRING) goto fail;
+
+ /* make sure we can not reply to resp2 client with resp3 verbatim string */
+ if (RedisModule_ReplyWithCallReply(ctx, reply) != REDISMODULE_ERR) goto fail;
+
+ const char* format;
+ size_t len;
+ const char* str = RedisModule_CallReplyVerbatim(reply, &len, &format);
+ RedisModuleString *s = RedisModule_CreateStringPrintf(ctx, "%.*s:%.*s", 3, format, (int)len, str);
+ RedisModule_ReplyWithString(ctx,s);
+ return REDISMODULE_OK;
+
+fail:
+ RedisModule_ReplyWithSimpleString(ctx,"ERR");
+ return REDISMODULE_OK;
+}
+
+int TestCallResp3Set(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_AutoMemory(ctx);
+ RedisModuleCallReply *reply;
+
+ RedisModule_Call(ctx,"DEL","c","myset");
+ RedisModule_Call(ctx,"sadd","ccc","myset", "v1", "v2");
+ reply = RedisModule_Call(ctx,"smembers","3c" ,"myset"); // N stands for resp 3 reply
+ if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_SET) goto fail;
+
+ /* make sure we can not reply to resp2 client with resp3 set */
+ if (RedisModule_ReplyWithCallReply(ctx, reply) != REDISMODULE_ERR) goto fail;
+
+ long long items = RedisModule_CallReplyLength(reply);
+ if (items != 2) goto fail;
+
+ RedisModuleCallReply *val0, *val1;
+
+ val0 = RedisModule_CallReplySetElement(reply,0);
+ val1 = RedisModule_CallReplySetElement(reply,1);
+
+ /*
+ * The order of elements on sets are not promised so we just
+ * veridy that the reply matches one of the elements.
+ */
+ if (!TestMatchReply(val0,"v1") && !TestMatchReply(val0,"v2")) goto fail;
+ if (!TestMatchReply(val1,"v1") && !TestMatchReply(val1,"v2")) goto fail;
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+
+fail:
+ RedisModule_ReplyWithSimpleString(ctx,"ERR");
+ return REDISMODULE_OK;
+}
+
+/* TEST.STRING.APPEND -- Test appending to an existing string object. */
+int TestStringAppend(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModuleString *s = RedisModule_CreateString(ctx,"foo",3);
+ RedisModule_StringAppendBuffer(ctx,s,"bar",3);
+ RedisModule_ReplyWithString(ctx,s);
+ RedisModule_FreeString(ctx,s);
+ return REDISMODULE_OK;
+}
+
+/* TEST.STRING.APPEND.AM -- Test append with retain when auto memory is on. */
+int TestStringAppendAM(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_AutoMemory(ctx);
+ RedisModuleString *s = RedisModule_CreateString(ctx,"foo",3);
+ RedisModule_RetainString(ctx,s);
+ RedisModule_TrimStringAllocation(s); /* Mostly NOP, but exercises the API function */
+ RedisModule_StringAppendBuffer(ctx,s,"bar",3);
+ RedisModule_ReplyWithString(ctx,s);
+ RedisModule_FreeString(ctx,s);
+ return REDISMODULE_OK;
+}
+
+/* TEST.STRING.TRIM -- Test we trim a string with free space. */
+int TestTrimString(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ RedisModuleString *s = RedisModule_CreateString(ctx,"foo",3);
+ char *tmp = RedisModule_Alloc(1024);
+ RedisModule_StringAppendBuffer(ctx,s,tmp,1024);
+ size_t string_len = RedisModule_MallocSizeString(s);
+ RedisModule_TrimStringAllocation(s);
+ size_t len_after_trim = RedisModule_MallocSizeString(s);
+
+ /* Determine if using jemalloc memory allocator. */
+ RedisModuleServerInfoData *info = RedisModule_GetServerInfo(ctx, "memory");
+ const char *field = RedisModule_ServerInfoGetFieldC(info, "mem_allocator");
+ int use_jemalloc = !strncmp(field, "jemalloc", 8);
+
+ /* Jemalloc will reallocate `s` from 2k to 1k after RedisModule_TrimStringAllocation(),
+ * but non-jemalloc memory allocators may keep the old size. */
+ if ((use_jemalloc && len_after_trim < string_len) ||
+ (!use_jemalloc && len_after_trim <= string_len))
+ {
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ } else {
+ RedisModule_ReplyWithError(ctx, "String was not trimmed as expected.");
+ }
+ RedisModule_FreeServerInfo(ctx, info);
+ RedisModule_Free(tmp);
+ RedisModule_FreeString(ctx,s);
+ return REDISMODULE_OK;
+}
+
+/* TEST.STRING.PRINTF -- Test string formatting. */
+int TestStringPrintf(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ RedisModule_AutoMemory(ctx);
+ if (argc < 3) {
+ return RedisModule_WrongArity(ctx);
+ }
+ RedisModuleString *s = RedisModule_CreateStringPrintf(ctx,
+ "Got %d args. argv[1]: %s, argv[2]: %s",
+ argc,
+ RedisModule_StringPtrLen(argv[1], NULL),
+ RedisModule_StringPtrLen(argv[2], NULL)
+ );
+
+ RedisModule_ReplyWithString(ctx,s);
+
+ return REDISMODULE_OK;
+}
+
+int failTest(RedisModuleCtx *ctx, const char *msg) {
+ RedisModule_ReplyWithError(ctx, msg);
+ return REDISMODULE_ERR;
+}
+
+int TestUnlink(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ RedisModule_AutoMemory(ctx);
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModuleKey *k = RedisModule_OpenKey(ctx, RedisModule_CreateStringPrintf(ctx, "unlinked"), REDISMODULE_WRITE | REDISMODULE_READ);
+ if (!k) return failTest(ctx, "Could not create key");
+
+ if (REDISMODULE_ERR == RedisModule_StringSet(k, RedisModule_CreateStringPrintf(ctx, "Foobar"))) {
+ return failTest(ctx, "Could not set string value");
+ }
+
+ RedisModuleCallReply *rep = RedisModule_Call(ctx, "EXISTS", "c", "unlinked");
+ if (!rep || RedisModule_CallReplyInteger(rep) != 1) {
+ return failTest(ctx, "Key does not exist before unlink");
+ }
+
+ if (REDISMODULE_ERR == RedisModule_UnlinkKey(k)) {
+ return failTest(ctx, "Could not unlink key");
+ }
+
+ rep = RedisModule_Call(ctx, "EXISTS", "c", "unlinked");
+ if (!rep || RedisModule_CallReplyInteger(rep) != 0) {
+ return failTest(ctx, "Could not verify key to be unlinked");
+ }
+ return RedisModule_ReplyWithSimpleString(ctx, "OK");
+}
+
+int TestNestedCallReplyArrayElement(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ RedisModule_AutoMemory(ctx);
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModuleString *expect_key = RedisModule_CreateString(ctx, "mykey", strlen("mykey"));
+ RedisModule_SelectDb(ctx, 1);
+ RedisModule_Call(ctx, "LPUSH", "sc", expect_key, "myvalue");
+
+ RedisModuleCallReply *scan_reply = RedisModule_Call(ctx, "SCAN", "l", (long long)0);
+ RedisModule_Assert(scan_reply != NULL && RedisModule_CallReplyType(scan_reply) == REDISMODULE_REPLY_ARRAY);
+ RedisModule_Assert(RedisModule_CallReplyLength(scan_reply) == 2);
+
+ long long scan_cursor;
+ RedisModuleCallReply *cursor_reply = RedisModule_CallReplyArrayElement(scan_reply, 0);
+ RedisModule_Assert(RedisModule_CallReplyType(cursor_reply) == REDISMODULE_REPLY_STRING);
+ RedisModule_Assert(RedisModule_StringToLongLong(RedisModule_CreateStringFromCallReply(cursor_reply), &scan_cursor) == REDISMODULE_OK);
+ RedisModule_Assert(scan_cursor == 0);
+
+ RedisModuleCallReply *keys_reply = RedisModule_CallReplyArrayElement(scan_reply, 1);
+ RedisModule_Assert(RedisModule_CallReplyType(keys_reply) == REDISMODULE_REPLY_ARRAY);
+ RedisModule_Assert( RedisModule_CallReplyLength(keys_reply) == 1);
+
+ RedisModuleCallReply *key_reply = RedisModule_CallReplyArrayElement(keys_reply, 0);
+ RedisModule_Assert(RedisModule_CallReplyType(key_reply) == REDISMODULE_REPLY_STRING);
+ RedisModuleString *key = RedisModule_CreateStringFromCallReply(key_reply);
+ RedisModule_Assert(RedisModule_StringCompare(key, expect_key) == 0);
+
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+/* TEST.STRING.TRUNCATE -- Test truncating an existing string object. */
+int TestStringTruncate(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ RedisModule_AutoMemory(ctx);
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_Call(ctx, "SET", "cc", "foo", "abcde");
+ RedisModuleKey *k = RedisModule_OpenKey(ctx, RedisModule_CreateStringPrintf(ctx, "foo"), REDISMODULE_READ | REDISMODULE_WRITE);
+ if (!k) return failTest(ctx, "Could not create key");
+
+ size_t len = 0;
+ char* s;
+
+ /* expand from 5 to 8 and check null pad */
+ if (REDISMODULE_ERR == RedisModule_StringTruncate(k, 8)) {
+ return failTest(ctx, "Could not truncate string value (8)");
+ }
+ s = RedisModule_StringDMA(k, &len, REDISMODULE_READ);
+ if (!s) {
+ return failTest(ctx, "Failed to read truncated string (8)");
+ } else if (len != 8) {
+ return failTest(ctx, "Failed to expand string value (8)");
+ } else if (0 != strncmp(s, "abcde\0\0\0", 8)) {
+ return failTest(ctx, "Failed to null pad string value (8)");
+ }
+
+ /* shrink from 8 to 4 */
+ if (REDISMODULE_ERR == RedisModule_StringTruncate(k, 4)) {
+ return failTest(ctx, "Could not truncate string value (4)");
+ }
+ s = RedisModule_StringDMA(k, &len, REDISMODULE_READ);
+ if (!s) {
+ return failTest(ctx, "Failed to read truncated string (4)");
+ } else if (len != 4) {
+ return failTest(ctx, "Failed to shrink string value (4)");
+ } else if (0 != strncmp(s, "abcd", 4)) {
+ return failTest(ctx, "Failed to truncate string value (4)");
+ }
+
+ /* shrink to 0 */
+ if (REDISMODULE_ERR == RedisModule_StringTruncate(k, 0)) {
+ return failTest(ctx, "Could not truncate string value (0)");
+ }
+ s = RedisModule_StringDMA(k, &len, REDISMODULE_READ);
+ if (!s) {
+ return failTest(ctx, "Failed to read truncated string (0)");
+ } else if (len != 0) {
+ return failTest(ctx, "Failed to shrink string value to (0)");
+ }
+
+ return RedisModule_ReplyWithSimpleString(ctx, "OK");
+}
+
+int NotifyCallback(RedisModuleCtx *ctx, int type, const char *event,
+ RedisModuleString *key) {
+ RedisModule_AutoMemory(ctx);
+ /* Increment a counter on the notifications: for each key notified we
+ * increment a counter */
+ RedisModule_Log(ctx, "notice", "Got event type %d, event %s, key %s", type,
+ event, RedisModule_StringPtrLen(key, NULL));
+
+ RedisModule_Call(ctx, "HINCRBY", "csc", "notifications", key, "1");
+ return REDISMODULE_OK;
+}
+
+/* TEST.NOTIFICATIONS -- Test Keyspace Notifications. */
+int TestNotifications(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ RedisModule_AutoMemory(ctx);
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+#define FAIL(msg, ...) \
+ { \
+ RedisModule_Log(ctx, "warning", "Failed NOTIFY Test. Reason: " #msg, ##__VA_ARGS__); \
+ goto err; \
+ }
+ RedisModule_Call(ctx, "FLUSHDB", "");
+
+ RedisModule_Call(ctx, "SET", "cc", "foo", "bar");
+ RedisModule_Call(ctx, "SET", "cc", "foo", "baz");
+ RedisModule_Call(ctx, "SADD", "cc", "bar", "x");
+ RedisModule_Call(ctx, "SADD", "cc", "bar", "y");
+
+ RedisModule_Call(ctx, "HSET", "ccc", "baz", "x", "y");
+ /* LPUSH should be ignored and not increment any counters */
+ RedisModule_Call(ctx, "LPUSH", "cc", "l", "y");
+ RedisModule_Call(ctx, "LPUSH", "cc", "l", "y");
+
+ /* Miss some keys intentionally so we will get a "keymiss" notification. */
+ RedisModule_Call(ctx, "GET", "c", "nosuchkey");
+ RedisModule_Call(ctx, "SMEMBERS", "c", "nosuchkey");
+
+ size_t sz;
+ const char *rep;
+ RedisModuleCallReply *r = RedisModule_Call(ctx, "HGET", "cc", "notifications", "foo");
+ if (r == NULL || RedisModule_CallReplyType(r) != REDISMODULE_REPLY_STRING) {
+ FAIL("Wrong or no reply for foo");
+ } else {
+ rep = RedisModule_CallReplyStringPtr(r, &sz);
+ if (sz != 1 || *rep != '2') {
+ FAIL("Got reply '%s'. expected '2'", RedisModule_CallReplyStringPtr(r, NULL));
+ }
+ }
+
+ r = RedisModule_Call(ctx, "HGET", "cc", "notifications", "bar");
+ if (r == NULL || RedisModule_CallReplyType(r) != REDISMODULE_REPLY_STRING) {
+ FAIL("Wrong or no reply for bar");
+ } else {
+ rep = RedisModule_CallReplyStringPtr(r, &sz);
+ if (sz != 1 || *rep != '2') {
+ FAIL("Got reply '%s'. expected '2'", rep);
+ }
+ }
+
+ r = RedisModule_Call(ctx, "HGET", "cc", "notifications", "baz");
+ if (r == NULL || RedisModule_CallReplyType(r) != REDISMODULE_REPLY_STRING) {
+ FAIL("Wrong or no reply for baz");
+ } else {
+ rep = RedisModule_CallReplyStringPtr(r, &sz);
+ if (sz != 1 || *rep != '1') {
+ FAIL("Got reply '%.*s'. expected '1'", (int)sz, rep);
+ }
+ }
+ /* For l we expect nothing since we didn't subscribe to list events */
+ r = RedisModule_Call(ctx, "HGET", "cc", "notifications", "l");
+ if (r == NULL || RedisModule_CallReplyType(r) != REDISMODULE_REPLY_NULL) {
+ FAIL("Wrong reply for l");
+ }
+
+ r = RedisModule_Call(ctx, "HGET", "cc", "notifications", "nosuchkey");
+ if (r == NULL || RedisModule_CallReplyType(r) != REDISMODULE_REPLY_STRING) {
+ FAIL("Wrong or no reply for nosuchkey");
+ } else {
+ rep = RedisModule_CallReplyStringPtr(r, &sz);
+ if (sz != 1 || *rep != '2') {
+ FAIL("Got reply '%.*s'. expected '2'", (int)sz, rep);
+ }
+ }
+
+ RedisModule_Call(ctx, "FLUSHDB", "");
+
+ return RedisModule_ReplyWithSimpleString(ctx, "OK");
+err:
+ RedisModule_Call(ctx, "FLUSHDB", "");
+
+ return RedisModule_ReplyWithSimpleString(ctx, "ERR");
+}
+
+/* TEST.CTXFLAGS -- Test GetContextFlags. */
+int TestCtxFlags(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argc);
+ REDISMODULE_NOT_USED(argv);
+
+ RedisModule_AutoMemory(ctx);
+
+ int ok = 1;
+ const char *errString = NULL;
+#undef FAIL
+#define FAIL(msg) \
+ { \
+ ok = 0; \
+ errString = msg; \
+ goto end; \
+ }
+
+ int flags = RedisModule_GetContextFlags(ctx);
+ if (flags == 0) {
+ FAIL("Got no flags");
+ }
+
+ if (flags & REDISMODULE_CTX_FLAGS_LUA) FAIL("Lua flag was set");
+ if (flags & REDISMODULE_CTX_FLAGS_MULTI) FAIL("Multi flag was set");
+
+ if (flags & REDISMODULE_CTX_FLAGS_AOF) FAIL("AOF Flag was set")
+ /* Enable AOF to test AOF flags */
+ RedisModule_Call(ctx, "config", "ccc", "set", "appendonly", "yes");
+ flags = RedisModule_GetContextFlags(ctx);
+ if (!(flags & REDISMODULE_CTX_FLAGS_AOF)) FAIL("AOF Flag not set after config set");
+
+ /* Disable RDB saving and test the flag. */
+ RedisModule_Call(ctx, "config", "ccc", "set", "save", "");
+ flags = RedisModule_GetContextFlags(ctx);
+ if (flags & REDISMODULE_CTX_FLAGS_RDB) FAIL("RDB Flag was set");
+ /* Enable RDB to test RDB flags */
+ RedisModule_Call(ctx, "config", "ccc", "set", "save", "900 1");
+ flags = RedisModule_GetContextFlags(ctx);
+ if (!(flags & REDISMODULE_CTX_FLAGS_RDB)) FAIL("RDB Flag was not set after config set");
+
+ if (!(flags & REDISMODULE_CTX_FLAGS_MASTER)) FAIL("Master flag was not set");
+ if (flags & REDISMODULE_CTX_FLAGS_SLAVE) FAIL("Slave flag was set");
+ if (flags & REDISMODULE_CTX_FLAGS_READONLY) FAIL("Read-only flag was set");
+ if (flags & REDISMODULE_CTX_FLAGS_CLUSTER) FAIL("Cluster flag was set");
+
+ /* Disable maxmemory and test the flag. (it is implicitly set in 32bit builds. */
+ RedisModule_Call(ctx, "config", "ccc", "set", "maxmemory", "0");
+ flags = RedisModule_GetContextFlags(ctx);
+ if (flags & REDISMODULE_CTX_FLAGS_MAXMEMORY) FAIL("Maxmemory flag was set");
+
+ /* Enable maxmemory and test the flag. */
+ RedisModule_Call(ctx, "config", "ccc", "set", "maxmemory", "100000000");
+ flags = RedisModule_GetContextFlags(ctx);
+ if (!(flags & REDISMODULE_CTX_FLAGS_MAXMEMORY))
+ FAIL("Maxmemory flag was not set after config set");
+
+ if (flags & REDISMODULE_CTX_FLAGS_EVICT) FAIL("Eviction flag was set");
+ RedisModule_Call(ctx, "config", "ccc", "set", "maxmemory-policy", "allkeys-lru");
+ flags = RedisModule_GetContextFlags(ctx);
+ if (!(flags & REDISMODULE_CTX_FLAGS_EVICT)) FAIL("Eviction flag was not set after config set");
+
+end:
+ /* Revert config changes */
+ RedisModule_Call(ctx, "config", "ccc", "set", "appendonly", "no");
+ RedisModule_Call(ctx, "config", "ccc", "set", "save", "");
+ RedisModule_Call(ctx, "config", "ccc", "set", "maxmemory", "0");
+ RedisModule_Call(ctx, "config", "ccc", "set", "maxmemory-policy", "noeviction");
+
+ if (!ok) {
+ RedisModule_Log(ctx, "warning", "Failed CTXFLAGS Test. Reason: %s", errString);
+ return RedisModule_ReplyWithSimpleString(ctx, "ERR");
+ }
+
+ return RedisModule_ReplyWithSimpleString(ctx, "OK");
+}
+
+/* ----------------------------- Test framework ----------------------------- */
+
+/* Return 1 if the reply matches the specified string, otherwise log errors
+ * in the server log and return 0. */
+int TestAssertErrorReply(RedisModuleCtx *ctx, RedisModuleCallReply *reply, char *str, size_t len) {
+ RedisModuleString *mystr, *expected;
+ if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_ERROR) {
+ return 0;
+ }
+
+ mystr = RedisModule_CreateStringFromCallReply(reply);
+ expected = RedisModule_CreateString(ctx,str,len);
+ if (RedisModule_StringCompare(mystr,expected) != 0) {
+ const char *mystr_ptr = RedisModule_StringPtrLen(mystr,NULL);
+ const char *expected_ptr = RedisModule_StringPtrLen(expected,NULL);
+ RedisModule_Log(ctx,"warning",
+ "Unexpected Error reply reply '%s' (instead of '%s')",
+ mystr_ptr, expected_ptr);
+ return 0;
+ }
+ return 1;
+}
+
+int TestAssertStringReply(RedisModuleCtx *ctx, RedisModuleCallReply *reply, char *str, size_t len) {
+ RedisModuleString *mystr, *expected;
+
+ if (RedisModule_CallReplyType(reply) == REDISMODULE_REPLY_ERROR) {
+ RedisModule_Log(ctx,"warning","Test error reply: %s",
+ RedisModule_CallReplyStringPtr(reply, NULL));
+ return 0;
+ } else if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_STRING) {
+ RedisModule_Log(ctx,"warning","Unexpected reply type %d",
+ RedisModule_CallReplyType(reply));
+ return 0;
+ }
+ mystr = RedisModule_CreateStringFromCallReply(reply);
+ expected = RedisModule_CreateString(ctx,str,len);
+ if (RedisModule_StringCompare(mystr,expected) != 0) {
+ const char *mystr_ptr = RedisModule_StringPtrLen(mystr,NULL);
+ const char *expected_ptr = RedisModule_StringPtrLen(expected,NULL);
+ RedisModule_Log(ctx,"warning",
+ "Unexpected string reply '%s' (instead of '%s')",
+ mystr_ptr, expected_ptr);
+ return 0;
+ }
+ return 1;
+}
+
+/* Return 1 if the reply matches the specified integer, otherwise log errors
+ * in the server log and return 0. */
+int TestAssertIntegerReply(RedisModuleCtx *ctx, RedisModuleCallReply *reply, long long expected) {
+ if (RedisModule_CallReplyType(reply) == REDISMODULE_REPLY_ERROR) {
+ RedisModule_Log(ctx,"warning","Test error reply: %s",
+ RedisModule_CallReplyStringPtr(reply, NULL));
+ return 0;
+ } else if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_INTEGER) {
+ RedisModule_Log(ctx,"warning","Unexpected reply type %d",
+ RedisModule_CallReplyType(reply));
+ return 0;
+ }
+ long long val = RedisModule_CallReplyInteger(reply);
+ if (val != expected) {
+ RedisModule_Log(ctx,"warning",
+ "Unexpected integer reply '%lld' (instead of '%lld')",
+ val, expected);
+ return 0;
+ }
+ return 1;
+}
+
+#define T(name,...) \
+ do { \
+ RedisModule_Log(ctx,"warning","Testing %s", name); \
+ reply = RedisModule_Call(ctx,name,__VA_ARGS__); \
+ } while (0)
+
+/* TEST.BASICS -- Run all the tests.
+ * Note: it is useful to run these tests from the module rather than TCL
+ * since it's easier to check the reply types like that (make a distinction
+ * between 0 and "0", etc. */
+int TestBasics(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_AutoMemory(ctx);
+ RedisModuleCallReply *reply;
+
+ /* Make sure the DB is empty before to proceed. */
+ T("dbsize","");
+ if (!TestAssertIntegerReply(ctx,reply,0)) goto fail;
+
+ T("ping","");
+ if (!TestAssertStringReply(ctx,reply,"PONG",4)) goto fail;
+
+ T("test.call","");
+ if (!TestAssertStringReply(ctx,reply,"OK",2)) goto fail;
+
+ T("test.callresp3map","");
+ if (!TestAssertStringReply(ctx,reply,"OK",2)) goto fail;
+
+ T("test.callresp3set","");
+ if (!TestAssertStringReply(ctx,reply,"OK",2)) goto fail;
+
+ T("test.callresp3double","");
+ if (!TestAssertStringReply(ctx,reply,"OK",2)) goto fail;
+
+ T("test.callresp3bool","");
+ if (!TestAssertStringReply(ctx,reply,"OK",2)) goto fail;
+
+ T("test.callresp3null","");
+ if (!TestAssertStringReply(ctx,reply,"OK",2)) goto fail;
+
+ T("test.callreplywithnestedreply","");
+ if (!TestAssertStringReply(ctx,reply,"test",4)) goto fail;
+
+ T("test.callreplywithbignumberreply","");
+ if (!TestAssertStringReply(ctx,reply,"1234567999999999999999999999999999999",37)) goto fail;
+
+ T("test.callreplywithverbatimstringreply","");
+ if (!TestAssertStringReply(ctx,reply,"txt:This is a verbatim\nstring",29)) goto fail;
+
+ T("test.ctxflags","");
+ if (!TestAssertStringReply(ctx,reply,"OK",2)) goto fail;
+
+ T("test.string.append","");
+ if (!TestAssertStringReply(ctx,reply,"foobar",6)) goto fail;
+
+ T("test.string.truncate","");
+ if (!TestAssertStringReply(ctx,reply,"OK",2)) goto fail;
+
+ T("test.unlink","");
+ if (!TestAssertStringReply(ctx,reply,"OK",2)) goto fail;
+
+ T("test.nestedcallreplyarray","");
+ if (!TestAssertStringReply(ctx,reply,"OK",2)) goto fail;
+
+ T("test.string.append.am","");
+ if (!TestAssertStringReply(ctx,reply,"foobar",6)) goto fail;
+
+ T("test.string.trim","");
+ if (!TestAssertStringReply(ctx,reply,"OK",2)) goto fail;
+
+ T("test.string.printf", "cc", "foo", "bar");
+ if (!TestAssertStringReply(ctx,reply,"Got 3 args. argv[1]: foo, argv[2]: bar",38)) goto fail;
+
+ T("test.notify", "");
+ if (!TestAssertStringReply(ctx,reply,"OK",2)) goto fail;
+
+ T("test.callreplywitharrayreply", "");
+ if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_ARRAY) goto fail;
+ if (RedisModule_CallReplyLength(reply) != 2) goto fail;
+ if (!TestAssertStringReply(ctx,RedisModule_CallReplyArrayElement(reply, 0),"test",4)) goto fail;
+ if (!TestAssertStringReply(ctx,RedisModule_CallReplyArrayElement(reply, 1),"1234",4)) goto fail;
+
+ T("foo", "E");
+ if (!TestAssertErrorReply(ctx,reply,"ERR unknown command 'foo', with args beginning with: ",53)) goto fail;
+
+ T("set", "Ec", "x");
+ if (!TestAssertErrorReply(ctx,reply,"ERR wrong number of arguments for 'set' command",47)) goto fail;
+
+ T("shutdown", "SE");
+ if (!TestAssertErrorReply(ctx,reply,"ERR command 'shutdown' is not allowed on script mode",52)) goto fail;
+
+ T("set", "WEcc", "x", "1");
+ if (!TestAssertErrorReply(ctx,reply,"ERR Write command 'set' was called while write is not allowed.",62)) goto fail;
+
+ RedisModule_ReplyWithSimpleString(ctx,"ALL TESTS PASSED");
+ return REDISMODULE_OK;
+
+fail:
+ RedisModule_ReplyWithSimpleString(ctx,
+ "SOME TEST DID NOT PASS! Check server logs");
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx,"test",1,REDISMODULE_APIVER_1)
+ == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ /* Perform RM_Call inside the RedisModule_OnLoad
+ * to verify that it works as expected without crashing.
+ * The tests will verify it on different configurations
+ * options (cluster/no cluster). A simple ping command
+ * is enough for this test. */
+ RedisModuleCallReply *reply = RedisModule_Call(ctx, "ping", "");
+ if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_STRING) {
+ RedisModule_FreeCallReply(reply);
+ return REDISMODULE_ERR;
+ }
+ size_t len;
+ const char *reply_str = RedisModule_CallReplyStringPtr(reply, &len);
+ if (len != 4) {
+ RedisModule_FreeCallReply(reply);
+ return REDISMODULE_ERR;
+ }
+ if (memcmp(reply_str, "PONG", 4) != 0) {
+ RedisModule_FreeCallReply(reply);
+ return REDISMODULE_ERR;
+ }
+ RedisModule_FreeCallReply(reply);
+
+ if (RedisModule_CreateCommand(ctx,"test.call",
+ TestCall,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.callresp3map",
+ TestCallResp3Map,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.callresp3attribute",
+ TestCallResp3Attribute,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.callresp3set",
+ TestCallResp3Set,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.callresp3double",
+ TestCallResp3Double,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.callresp3bool",
+ TestCallResp3Bool,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.callresp3null",
+ TestCallResp3Null,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.callreplywitharrayreply",
+ TestCallReplyWithArrayReply,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.callreplywithnestedreply",
+ TestCallReplyWithNestedReply,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.callreplywithbignumberreply",
+ TestCallResp3BigNumber,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.callreplywithverbatimstringreply",
+ TestCallResp3Verbatim,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.string.append",
+ TestStringAppend,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.string.trim",
+ TestTrimString,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.string.append.am",
+ TestStringAppendAM,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.string.truncate",
+ TestStringTruncate,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.string.printf",
+ TestStringPrintf,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.ctxflags",
+ TestCtxFlags,"readonly",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.unlink",
+ TestUnlink,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.nestedcallreplyarray",
+ TestNestedCallReplyArrayElement,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.basics",
+ TestBasics,"write",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ /* the following commands are used by an external test and should not be added to TestBasics */
+ if (RedisModule_CreateCommand(ctx,"test.rmcallautomode",
+ TestCallRespAutoMode,"write",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.getresp",
+ TestGetResp,"readonly",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ RedisModule_SubscribeToKeyspaceEvents(ctx,
+ REDISMODULE_NOTIFY_HASH |
+ REDISMODULE_NOTIFY_SET |
+ REDISMODULE_NOTIFY_STRING |
+ REDISMODULE_NOTIFY_KEY_MISS,
+ NotifyCallback);
+ if (RedisModule_CreateCommand(ctx,"test.notify",
+ TestNotifications,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/blockedclient.c b/tests/modules/blockedclient.c
new file mode 100644
index 0000000..92060fd
--- /dev/null
+++ b/tests/modules/blockedclient.c
@@ -0,0 +1,712 @@
+/* define macros for having usleep */
+#define _BSD_SOURCE
+#define _DEFAULT_SOURCE
+#include <unistd.h>
+
+#include "redismodule.h"
+#include <assert.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <strings.h>
+
+#define UNUSED(V) ((void) V)
+
+/* used to test processing events during slow bg operation */
+static volatile int g_slow_bg_operation = 0;
+static volatile int g_is_in_slow_bg_operation = 0;
+
+void *sub_worker(void *arg) {
+ // Get Redis module context
+ RedisModuleCtx *ctx = (RedisModuleCtx *)arg;
+
+ // Try acquiring GIL
+ int res = RedisModule_ThreadSafeContextTryLock(ctx);
+
+ // GIL is already taken by the calling thread expecting to fail.
+ assert(res != REDISMODULE_OK);
+
+ return NULL;
+}
+
+void *worker(void *arg) {
+ // Retrieve blocked client
+ RedisModuleBlockedClient *bc = (RedisModuleBlockedClient *)arg;
+
+ // Get Redis module context
+ RedisModuleCtx *ctx = RedisModule_GetThreadSafeContext(bc);
+
+ // Acquire GIL
+ RedisModule_ThreadSafeContextLock(ctx);
+
+ // Create another thread which will try to acquire the GIL
+ pthread_t tid;
+ int res = pthread_create(&tid, NULL, sub_worker, ctx);
+ assert(res == 0);
+
+ // Wait for thread
+ pthread_join(tid, NULL);
+
+ // Release GIL
+ RedisModule_ThreadSafeContextUnlock(ctx);
+
+ // Reply to client
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+
+ // Unblock client
+ RedisModule_UnblockClient(bc, NULL);
+
+ // Free the Redis module context
+ RedisModule_FreeThreadSafeContext(ctx);
+
+ return NULL;
+}
+
+int acquire_gil(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ UNUSED(argv);
+ UNUSED(argc);
+
+ int flags = RedisModule_GetContextFlags(ctx);
+ int allFlags = RedisModule_GetContextFlagsAll();
+ if ((allFlags & REDISMODULE_CTX_FLAGS_MULTI) &&
+ (flags & REDISMODULE_CTX_FLAGS_MULTI)) {
+ RedisModule_ReplyWithSimpleString(ctx, "Blocked client is not supported inside multi");
+ return REDISMODULE_OK;
+ }
+
+ if ((allFlags & REDISMODULE_CTX_FLAGS_DENY_BLOCKING) &&
+ (flags & REDISMODULE_CTX_FLAGS_DENY_BLOCKING)) {
+ RedisModule_ReplyWithSimpleString(ctx, "Blocked client is not allowed");
+ return REDISMODULE_OK;
+ }
+
+ /* This command handler tries to acquire the GIL twice
+ * once in the worker thread using "RedisModule_ThreadSafeContextLock"
+ * second in the sub-worker thread
+ * using "RedisModule_ThreadSafeContextTryLock"
+ * as the GIL is already locked. */
+ RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx, NULL, NULL, NULL, 0);
+
+ pthread_t tid;
+ int res = pthread_create(&tid, NULL, worker, bc);
+ assert(res == 0);
+
+ return REDISMODULE_OK;
+}
+
+typedef struct {
+ RedisModuleString **argv;
+ int argc;
+ RedisModuleBlockedClient *bc;
+} bg_call_data;
+
+void *bg_call_worker(void *arg) {
+ bg_call_data *bg = arg;
+
+ // Get Redis module context
+ RedisModuleCtx *ctx = RedisModule_GetThreadSafeContext(bg->bc);
+
+ // Acquire GIL
+ RedisModule_ThreadSafeContextLock(ctx);
+
+ // Test slow operation yielding
+ if (g_slow_bg_operation) {
+ g_is_in_slow_bg_operation = 1;
+ while (g_slow_bg_operation) {
+ RedisModule_Yield(ctx, REDISMODULE_YIELD_FLAG_CLIENTS, "Slow module operation");
+ usleep(1000);
+ }
+ g_is_in_slow_bg_operation = 0;
+ }
+
+ // Call the command
+ const char *module_cmd = RedisModule_StringPtrLen(bg->argv[0], NULL);
+ int cmd_pos = 1;
+ RedisModuleString *format_redis_str = RedisModule_CreateString(NULL, "v", 1);
+ if (!strcasecmp(module_cmd, "do_bg_rm_call_format")) {
+ cmd_pos = 2;
+ size_t format_len;
+ const char *format = RedisModule_StringPtrLen(bg->argv[1], &format_len);
+ RedisModule_StringAppendBuffer(NULL, format_redis_str, format, format_len);
+ RedisModule_StringAppendBuffer(NULL, format_redis_str, "E", 1);
+ }
+ const char *format = RedisModule_StringPtrLen(format_redis_str, NULL);
+ const char *cmd = RedisModule_StringPtrLen(bg->argv[cmd_pos], NULL);
+ RedisModuleCallReply *rep = RedisModule_Call(ctx, cmd, format, bg->argv + cmd_pos + 1, bg->argc - cmd_pos - 1);
+ RedisModule_FreeString(NULL, format_redis_str);
+
+ // Release GIL
+ RedisModule_ThreadSafeContextUnlock(ctx);
+
+ // Reply to client
+ if (!rep) {
+ RedisModule_ReplyWithError(ctx, "NULL reply returned");
+ } else {
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ }
+
+ // Unblock client
+ RedisModule_UnblockClient(bg->bc, NULL);
+
+ /* Free the arguments */
+ for (int i=0; i<bg->argc; i++)
+ RedisModule_FreeString(ctx, bg->argv[i]);
+ RedisModule_Free(bg->argv);
+ RedisModule_Free(bg);
+
+ // Free the Redis module context
+ RedisModule_FreeThreadSafeContext(ctx);
+
+ return NULL;
+}
+
+int do_bg_rm_call(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ UNUSED(argv);
+ UNUSED(argc);
+
+ /* Make sure we're not trying to block a client when we shouldn't */
+ int flags = RedisModule_GetContextFlags(ctx);
+ int allFlags = RedisModule_GetContextFlagsAll();
+ if ((allFlags & REDISMODULE_CTX_FLAGS_MULTI) &&
+ (flags & REDISMODULE_CTX_FLAGS_MULTI)) {
+ RedisModule_ReplyWithSimpleString(ctx, "Blocked client is not supported inside multi");
+ return REDISMODULE_OK;
+ }
+ if ((allFlags & REDISMODULE_CTX_FLAGS_DENY_BLOCKING) &&
+ (flags & REDISMODULE_CTX_FLAGS_DENY_BLOCKING)) {
+ RedisModule_ReplyWithSimpleString(ctx, "Blocked client is not allowed");
+ return REDISMODULE_OK;
+ }
+
+ /* Make a copy of the arguments and pass them to the thread. */
+ bg_call_data *bg = RedisModule_Alloc(sizeof(bg_call_data));
+ bg->argv = RedisModule_Alloc(sizeof(RedisModuleString*)*argc);
+ bg->argc = argc;
+ for (int i=0; i<argc; i++)
+ bg->argv[i] = RedisModule_HoldString(ctx, argv[i]);
+
+ /* Block the client */
+ bg->bc = RedisModule_BlockClient(ctx, NULL, NULL, NULL, 0);
+
+ /* Start a thread to handle the request */
+ pthread_t tid;
+ int res = pthread_create(&tid, NULL, bg_call_worker, bg);
+ assert(res == 0);
+
+ return REDISMODULE_OK;
+}
+
+int do_rm_call(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ UNUSED(argv);
+ UNUSED(argc);
+
+ if(argc < 2){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ const char* cmd = RedisModule_StringPtrLen(argv[1], NULL);
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, cmd, "Ev", argv + 2, argc - 2);
+ if(!rep){
+ RedisModule_ReplyWithError(ctx, "NULL reply returned");
+ }else{
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ }
+
+ return REDISMODULE_OK;
+}
+
+static void rm_call_async_send_reply(RedisModuleCtx *ctx, RedisModuleCallReply *reply) {
+ RedisModule_ReplyWithCallReply(ctx, reply);
+ RedisModule_FreeCallReply(reply);
+}
+
+/* Called when the command that was blocked on 'RM_Call' gets unblocked
+ * and send the reply to the blocked client. */
+static void rm_call_async_on_unblocked(RedisModuleCtx *ctx, RedisModuleCallReply *reply, void *private_data) {
+ UNUSED(ctx);
+ RedisModuleBlockedClient *bc = private_data;
+ RedisModuleCtx *bctx = RedisModule_GetThreadSafeContext(bc);
+ rm_call_async_send_reply(bctx, reply);
+ RedisModule_FreeThreadSafeContext(bctx);
+ RedisModule_UnblockClient(bc, RedisModule_BlockClientGetPrivateData(bc));
+}
+
+int do_rm_call_async_fire_and_forget(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ UNUSED(argv);
+ UNUSED(argc);
+
+ if(argc < 2){
+ return RedisModule_WrongArity(ctx);
+ }
+ const char* cmd = RedisModule_StringPtrLen(argv[1], NULL);
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, cmd, "!KEv", argv + 2, argc - 2);
+
+ if(RedisModule_CallReplyType(rep) != REDISMODULE_REPLY_PROMISE) {
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ } else {
+ RedisModule_ReplyWithSimpleString(ctx, "Blocked");
+ }
+ RedisModule_FreeCallReply(rep);
+
+ return REDISMODULE_OK;
+}
+
+static void do_rm_call_async_free_pd(RedisModuleCtx * ctx, void *pd) {
+ UNUSED(ctx);
+ RedisModule_FreeCallReply(pd);
+}
+
+static void do_rm_call_async_disconnect(RedisModuleCtx *ctx, struct RedisModuleBlockedClient *bc) {
+ UNUSED(ctx);
+ RedisModuleCallReply* rep = RedisModule_BlockClientGetPrivateData(bc);
+ RedisModule_CallReplyPromiseAbort(rep, NULL);
+ RedisModule_FreeCallReply(rep);
+ RedisModule_AbortBlock(bc);
+}
+
+/*
+ * Callback for do_rm_call_async / do_rm_call_async_script_mode
+ * Gets the command to invoke as the first argument to the command and runs it,
+ * passing the rest of the arguments to the command invocation.
+ * If the command got blocked, blocks the client and unblock it when the command gets unblocked,
+ * this allows check the K (allow blocking) argument to RM_Call.
+ */
+int do_rm_call_async(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ UNUSED(argv);
+ UNUSED(argc);
+
+ if(argc < 2){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ size_t format_len = 0;
+ char format[6] = {0};
+
+ if (!(RedisModule_GetContextFlags(ctx) & REDISMODULE_CTX_FLAGS_DENY_BLOCKING)) {
+ /* We are allowed to block the client so we can allow RM_Call to also block us */
+ format[format_len++] = 'K';
+ }
+
+ const char* invoked_cmd = RedisModule_StringPtrLen(argv[0], NULL);
+ if (strcasecmp(invoked_cmd, "do_rm_call_async_script_mode") == 0) {
+ format[format_len++] = 'S';
+ }
+
+ format[format_len++] = 'E';
+ format[format_len++] = 'v';
+ if (strcasecmp(invoked_cmd, "do_rm_call_async_no_replicate") != 0) {
+ /* Notice, without the '!' flag we will have inconsistency between master and replica.
+ * This is used only to check '!' flag correctness on blocked commands. */
+ format[format_len++] = '!';
+ }
+
+ const char* cmd = RedisModule_StringPtrLen(argv[1], NULL);
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, cmd, format, argv + 2, argc - 2);
+
+ if(RedisModule_CallReplyType(rep) != REDISMODULE_REPLY_PROMISE) {
+ rm_call_async_send_reply(ctx, rep);
+ } else {
+ RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx, NULL, NULL, do_rm_call_async_free_pd, 0);
+ RedisModule_SetDisconnectCallback(bc, do_rm_call_async_disconnect);
+ RedisModule_BlockClientSetPrivateData(bc, rep);
+ RedisModule_CallReplyPromiseSetUnblockHandler(rep, rm_call_async_on_unblocked, bc);
+ }
+
+ return REDISMODULE_OK;
+}
+
+typedef struct ThreadedAsyncRMCallCtx{
+ RedisModuleBlockedClient *bc;
+ RedisModuleCallReply *reply;
+} ThreadedAsyncRMCallCtx;
+
+void *send_async_reply(void *arg) {
+ ThreadedAsyncRMCallCtx *ta_rm_call_ctx = arg;
+ rm_call_async_on_unblocked(NULL, ta_rm_call_ctx->reply, ta_rm_call_ctx->bc);
+ RedisModule_Free(ta_rm_call_ctx);
+ return NULL;
+}
+
+/* Called when the command that was blocked on 'RM_Call' gets unblocked
+ * and schedule a thread to send the reply to the blocked client. */
+static void rm_call_async_reply_on_thread(RedisModuleCtx *ctx, RedisModuleCallReply *reply, void *private_data) {
+ UNUSED(ctx);
+ ThreadedAsyncRMCallCtx *ta_rm_call_ctx = RedisModule_Alloc(sizeof(*ta_rm_call_ctx));
+ ta_rm_call_ctx->bc = private_data;
+ ta_rm_call_ctx->reply = reply;
+ pthread_t tid;
+ int res = pthread_create(&tid, NULL, send_async_reply, ta_rm_call_ctx);
+ assert(res == 0);
+}
+
+/*
+ * Callback for do_rm_call_async_on_thread.
+ * Gets the command to invoke as the first argument to the command and runs it,
+ * passing the rest of the arguments to the command invocation.
+ * If the command got blocked, blocks the client and unblock on a background thread.
+ * this allows check the K (allow blocking) argument to RM_Call, and make sure that the reply
+ * that passes to unblock handler is owned by the handler and are not attached to any
+ * context that might be freed after the callback ends.
+ */
+int do_rm_call_async_on_thread(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ UNUSED(argv);
+ UNUSED(argc);
+
+ if(argc < 2){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ const char* cmd = RedisModule_StringPtrLen(argv[1], NULL);
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, cmd, "KEv", argv + 2, argc - 2);
+
+ if(RedisModule_CallReplyType(rep) != REDISMODULE_REPLY_PROMISE) {
+ rm_call_async_send_reply(ctx, rep);
+ } else {
+ RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx, NULL, NULL, NULL, 0);
+ RedisModule_CallReplyPromiseSetUnblockHandler(rep, rm_call_async_reply_on_thread, bc);
+ RedisModule_FreeCallReply(rep);
+ }
+
+ return REDISMODULE_OK;
+}
+
+/* Private data for wait_and_do_rm_call_async that holds information about:
+ * 1. the block client, to unblock when done.
+ * 2. the arguments, contains the command to run using RM_Call */
+typedef struct WaitAndDoRMCallCtx {
+ RedisModuleBlockedClient *bc;
+ RedisModuleString **argv;
+ int argc;
+} WaitAndDoRMCallCtx;
+
+/*
+ * This callback will be called when the 'wait' command invoke on 'wait_and_do_rm_call_async' will finish.
+ * This callback will continue the execution flow just like 'do_rm_call_async' command.
+ */
+static void wait_and_do_rm_call_async_on_unblocked(RedisModuleCtx *ctx, RedisModuleCallReply *reply, void *private_data) {
+ WaitAndDoRMCallCtx *wctx = private_data;
+ if (RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_INTEGER) {
+ goto done;
+ }
+
+ if (RedisModule_CallReplyInteger(reply) != 1) {
+ goto done;
+ }
+
+ RedisModule_FreeCallReply(reply);
+ reply = NULL;
+
+ const char* cmd = RedisModule_StringPtrLen(wctx->argv[0], NULL);
+ reply = RedisModule_Call(ctx, cmd, "!EKv", wctx->argv + 1, wctx->argc - 1);
+
+done:
+ if(RedisModule_CallReplyType(reply) != REDISMODULE_REPLY_PROMISE) {
+ RedisModuleCtx *bctx = RedisModule_GetThreadSafeContext(wctx->bc);
+ rm_call_async_send_reply(bctx, reply);
+ RedisModule_FreeThreadSafeContext(bctx);
+ RedisModule_UnblockClient(wctx->bc, NULL);
+ } else {
+ RedisModule_CallReplyPromiseSetUnblockHandler(reply, rm_call_async_on_unblocked, wctx->bc);
+ RedisModule_FreeCallReply(reply);
+ }
+ for (int i = 0 ; i < wctx->argc ; ++i) {
+ RedisModule_FreeString(NULL, wctx->argv[i]);
+ }
+ RedisModule_Free(wctx->argv);
+ RedisModule_Free(wctx);
+}
+
+/*
+ * Callback for wait_and_do_rm_call
+ * Gets the command to invoke as the first argument, runs 'wait'
+ * command (using the K flag to RM_Call). Once the wait finished, runs the
+ * command that was given (just like 'do_rm_call_async').
+ */
+int wait_and_do_rm_call_async(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+
+ if(argc < 2){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ int flags = RedisModule_GetContextFlags(ctx);
+ if (flags & REDISMODULE_CTX_FLAGS_DENY_BLOCKING) {
+ return RedisModule_ReplyWithError(ctx, "Err can not run wait, blocking is not allowed.");
+ }
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "wait", "!EKcc", "1", "0");
+ if(RedisModule_CallReplyType(rep) != REDISMODULE_REPLY_PROMISE) {
+ rm_call_async_send_reply(ctx, rep);
+ } else {
+ RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx, NULL, NULL, NULL, 0);
+ WaitAndDoRMCallCtx *wctx = RedisModule_Alloc(sizeof(*wctx));
+ *wctx = (WaitAndDoRMCallCtx){
+ .bc = bc,
+ .argv = RedisModule_Alloc((argc - 1) * sizeof(RedisModuleString*)),
+ .argc = argc - 1,
+ };
+
+ for (int i = 1 ; i < argc ; ++i) {
+ wctx->argv[i - 1] = RedisModule_HoldString(NULL, argv[i]);
+ }
+ RedisModule_CallReplyPromiseSetUnblockHandler(rep, wait_and_do_rm_call_async_on_unblocked, wctx);
+ RedisModule_FreeCallReply(rep);
+ }
+
+ return REDISMODULE_OK;
+}
+
+static void blpop_and_set_multiple_keys_on_unblocked(RedisModuleCtx *ctx, RedisModuleCallReply *reply, void *private_data) {
+ /* ignore the reply */
+ RedisModule_FreeCallReply(reply);
+ WaitAndDoRMCallCtx *wctx = private_data;
+ for (int i = 0 ; i < wctx->argc ; i += 2) {
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "set", "!ss", wctx->argv[i], wctx->argv[i + 1]);
+ RedisModule_FreeCallReply(rep);
+ }
+
+ RedisModuleCtx *bctx = RedisModule_GetThreadSafeContext(wctx->bc);
+ RedisModule_ReplyWithSimpleString(bctx, "OK");
+ RedisModule_FreeThreadSafeContext(bctx);
+ RedisModule_UnblockClient(wctx->bc, NULL);
+
+ for (int i = 0 ; i < wctx->argc ; ++i) {
+ RedisModule_FreeString(NULL, wctx->argv[i]);
+ }
+ RedisModule_Free(wctx->argv);
+ RedisModule_Free(wctx);
+
+}
+
+/*
+ * Performs a blpop command on a given list and when unblocked set multiple string keys.
+ * This command allows checking that the unblock callback is performed as a unit
+ * and its effect are replicated to the replica and AOF wrapped with multi exec.
+ */
+int blpop_and_set_multiple_keys(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+
+ if(argc < 2 || argc % 2 != 0){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ int flags = RedisModule_GetContextFlags(ctx);
+ if (flags & REDISMODULE_CTX_FLAGS_DENY_BLOCKING) {
+ return RedisModule_ReplyWithError(ctx, "Err can not run wait, blocking is not allowed.");
+ }
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "blpop", "!EKsc", argv[1], "0");
+ if(RedisModule_CallReplyType(rep) != REDISMODULE_REPLY_PROMISE) {
+ rm_call_async_send_reply(ctx, rep);
+ } else {
+ RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx, NULL, NULL, NULL, 0);
+ WaitAndDoRMCallCtx *wctx = RedisModule_Alloc(sizeof(*wctx));
+ *wctx = (WaitAndDoRMCallCtx){
+ .bc = bc,
+ .argv = RedisModule_Alloc((argc - 2) * sizeof(RedisModuleString*)),
+ .argc = argc - 2,
+ };
+
+ for (int i = 0 ; i < argc - 2 ; ++i) {
+ wctx->argv[i] = RedisModule_HoldString(NULL, argv[i + 2]);
+ }
+ RedisModule_CallReplyPromiseSetUnblockHandler(rep, blpop_and_set_multiple_keys_on_unblocked, wctx);
+ RedisModule_FreeCallReply(rep);
+ }
+
+ return REDISMODULE_OK;
+}
+
+/* simulate a blocked client replying to a thread safe context without creating a thread */
+int do_fake_bg_true(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+
+ RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx, NULL, NULL, NULL, 0);
+ RedisModuleCtx *bctx = RedisModule_GetThreadSafeContext(bc);
+
+ RedisModule_ReplyWithBool(bctx, 1);
+
+ RedisModule_FreeThreadSafeContext(bctx);
+ RedisModule_UnblockClient(bc, NULL);
+
+ return REDISMODULE_OK;
+}
+
+
+/* this flag is used to work with busy commands, that might take a while
+ * and ability to stop the busy work with a different command*/
+static volatile int abort_flag = 0;
+
+int slow_fg_command(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+ long long block_time = 0;
+ if (RedisModule_StringToLongLong(argv[1], &block_time) != REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "Invalid integer value");
+ return REDISMODULE_OK;
+ }
+
+ uint64_t start_time = RedisModule_MonotonicMicroseconds();
+ /* when not blocking indefinitely, we don't process client commands in this test. */
+ int yield_flags = block_time? REDISMODULE_YIELD_FLAG_NONE: REDISMODULE_YIELD_FLAG_CLIENTS;
+ while (!abort_flag) {
+ RedisModule_Yield(ctx, yield_flags, "Slow module operation");
+ usleep(1000);
+ if (block_time && RedisModule_MonotonicMicroseconds() - start_time > (uint64_t)block_time)
+ break;
+ }
+
+ abort_flag = 0;
+ RedisModule_ReplyWithLongLong(ctx, 1);
+ return REDISMODULE_OK;
+}
+
+int stop_slow_fg_command(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ abort_flag = 1;
+ RedisModule_ReplyWithLongLong(ctx, 1);
+ return REDISMODULE_OK;
+}
+
+/* used to enable or disable slow operation in do_bg_rm_call */
+static int set_slow_bg_operation(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+ long long ll;
+ if (RedisModule_StringToLongLong(argv[1], &ll) != REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "Invalid integer value");
+ return REDISMODULE_OK;
+ }
+ g_slow_bg_operation = ll;
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+/* used to test if we reached the slow operation in do_bg_rm_call */
+static int is_in_slow_bg_operation(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ if (argc != 1) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ RedisModule_ReplyWithLongLong(ctx, g_is_in_slow_bg_operation);
+ return REDISMODULE_OK;
+}
+
+static void timer_callback(RedisModuleCtx *ctx, void *data)
+{
+ UNUSED(ctx);
+
+ RedisModuleBlockedClient *bc = data;
+
+ // Get Redis module context
+ RedisModuleCtx *reply_ctx = RedisModule_GetThreadSafeContext(bc);
+
+ // Reply to client
+ RedisModule_ReplyWithSimpleString(reply_ctx, "OK");
+
+ // Unblock client
+ RedisModule_UnblockClient(bc, NULL);
+
+ // Free the Redis module context
+ RedisModule_FreeThreadSafeContext(reply_ctx);
+}
+
+int unblock_by_timer(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc != 2)
+ return RedisModule_WrongArity(ctx);
+
+ long long period;
+ if (RedisModule_StringToLongLong(argv[1],&period) != REDISMODULE_OK)
+ return RedisModule_ReplyWithError(ctx,"ERR invalid period");
+
+ RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx, NULL, NULL, NULL, 0);
+ RedisModule_CreateTimer(ctx, period, timer_callback, bc);
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx, "blockedclient", 1, REDISMODULE_APIVER_1)== REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "acquire_gil", acquire_gil, "", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "do_rm_call", do_rm_call,
+ "write", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "do_rm_call_async", do_rm_call_async,
+ "write", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "do_rm_call_async_on_thread", do_rm_call_async_on_thread,
+ "write", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "do_rm_call_async_script_mode", do_rm_call_async,
+ "write", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "do_rm_call_async_no_replicate", do_rm_call_async,
+ "write", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "do_rm_call_fire_and_forget", do_rm_call_async_fire_and_forget,
+ "write", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "wait_and_do_rm_call", wait_and_do_rm_call_async,
+ "write", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "blpop_and_set_multiple_keys", blpop_and_set_multiple_keys,
+ "write", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "do_bg_rm_call", do_bg_rm_call, "", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "do_bg_rm_call_format", do_bg_rm_call, "", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "do_fake_bg_true", do_fake_bg_true, "", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "slow_fg_command", slow_fg_command,"", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "stop_slow_fg_command", stop_slow_fg_command,"allow-busy", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "set_slow_bg_operation", set_slow_bg_operation, "allow-busy", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "is_in_slow_bg_operation", is_in_slow_bg_operation, "allow-busy", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "unblock_by_timer", unblock_by_timer, "", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/blockonbackground.c b/tests/modules/blockonbackground.c
new file mode 100644
index 0000000..2e3b1a5
--- /dev/null
+++ b/tests/modules/blockonbackground.c
@@ -0,0 +1,295 @@
+#define _XOPEN_SOURCE 700
+#include "redismodule.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <time.h>
+
+#define UNUSED(x) (void)(x)
+
+/* Reply callback for blocking command BLOCK.DEBUG */
+int HelloBlock_Reply(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+ int *myint = RedisModule_GetBlockedClientPrivateData(ctx);
+ return RedisModule_ReplyWithLongLong(ctx,*myint);
+}
+
+/* Timeout callback for blocking command BLOCK.DEBUG */
+int HelloBlock_Timeout(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+ RedisModuleBlockedClient *bc = RedisModule_GetBlockedClientHandle(ctx);
+ RedisModule_BlockedClientMeasureTimeEnd(bc);
+ return RedisModule_ReplyWithSimpleString(ctx,"Request timedout");
+}
+
+/* Private data freeing callback for BLOCK.DEBUG command. */
+void HelloBlock_FreeData(RedisModuleCtx *ctx, void *privdata) {
+ UNUSED(ctx);
+ RedisModule_Free(privdata);
+}
+
+/* Private data freeing callback for BLOCK.BLOCK command. */
+void HelloBlock_FreeStringData(RedisModuleCtx *ctx, void *privdata) {
+ RedisModule_FreeString(ctx, (RedisModuleString*)privdata);
+}
+
+/* The thread entry point that actually executes the blocking part
+ * of the command BLOCK.DEBUG. */
+void *BlockDebug_ThreadMain(void *arg) {
+ void **targ = arg;
+ RedisModuleBlockedClient *bc = targ[0];
+ long long delay = (unsigned long)targ[1];
+ long long enable_time_track = (unsigned long)targ[2];
+ if (enable_time_track)
+ RedisModule_BlockedClientMeasureTimeStart(bc);
+ RedisModule_Free(targ);
+
+ struct timespec ts;
+ ts.tv_sec = delay / 1000;
+ ts.tv_nsec = (delay % 1000) * 1000000;
+ nanosleep(&ts, NULL);
+ int *r = RedisModule_Alloc(sizeof(int));
+ *r = rand();
+ if (enable_time_track)
+ RedisModule_BlockedClientMeasureTimeEnd(bc);
+ RedisModule_UnblockClient(bc,r);
+ return NULL;
+}
+
+/* The thread entry point that actually executes the blocking part
+ * of the command BLOCK.DOUBLE_DEBUG. */
+void *DoubleBlock_ThreadMain(void *arg) {
+ void **targ = arg;
+ RedisModuleBlockedClient *bc = targ[0];
+ long long delay = (unsigned long)targ[1];
+ RedisModule_BlockedClientMeasureTimeStart(bc);
+ RedisModule_Free(targ);
+ struct timespec ts;
+ ts.tv_sec = delay / 1000;
+ ts.tv_nsec = (delay % 1000) * 1000000;
+ nanosleep(&ts, NULL);
+ int *r = RedisModule_Alloc(sizeof(int));
+ *r = rand();
+ RedisModule_BlockedClientMeasureTimeEnd(bc);
+ /* call again RedisModule_BlockedClientMeasureTimeStart() and
+ * RedisModule_BlockedClientMeasureTimeEnd and ensure that the
+ * total execution time is 2x the delay. */
+ RedisModule_BlockedClientMeasureTimeStart(bc);
+ nanosleep(&ts, NULL);
+ RedisModule_BlockedClientMeasureTimeEnd(bc);
+
+ RedisModule_UnblockClient(bc,r);
+ return NULL;
+}
+
+void HelloBlock_Disconnected(RedisModuleCtx *ctx, RedisModuleBlockedClient *bc) {
+ RedisModule_Log(ctx,"warning","Blocked client %p disconnected!",
+ (void*)bc);
+}
+
+/* BLOCK.DEBUG <delay_ms> <timeout_ms> -- Block for <count> milliseconds, then reply with
+ * a random number. Timeout is the command timeout, so that you can test
+ * what happens when the delay is greater than the timeout. */
+int HelloBlock_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 3) return RedisModule_WrongArity(ctx);
+ long long delay;
+ long long timeout;
+
+ if (RedisModule_StringToLongLong(argv[1],&delay) != REDISMODULE_OK) {
+ return RedisModule_ReplyWithError(ctx,"ERR invalid count");
+ }
+
+ if (RedisModule_StringToLongLong(argv[2],&timeout) != REDISMODULE_OK) {
+ return RedisModule_ReplyWithError(ctx,"ERR invalid count");
+ }
+
+ pthread_t tid;
+ RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx,HelloBlock_Reply,HelloBlock_Timeout,HelloBlock_FreeData,timeout);
+
+ /* Here we set a disconnection handler, however since this module will
+ * block in sleep() in a thread, there is not much we can do in the
+ * callback, so this is just to show you the API. */
+ RedisModule_SetDisconnectCallback(bc,HelloBlock_Disconnected);
+
+ /* Now that we setup a blocking client, we need to pass the control
+ * to the thread. However we need to pass arguments to the thread:
+ * the delay and a reference to the blocked client handle. */
+ void **targ = RedisModule_Alloc(sizeof(void*)*3);
+ targ[0] = bc;
+ targ[1] = (void*)(unsigned long) delay;
+ // pass 1 as flag to enable time tracking
+ targ[2] = (void*)(unsigned long) 1;
+
+ if (pthread_create(&tid,NULL,BlockDebug_ThreadMain,targ) != 0) {
+ RedisModule_AbortBlock(bc);
+ return RedisModule_ReplyWithError(ctx,"-ERR Can't start thread");
+ }
+ return REDISMODULE_OK;
+}
+
+/* BLOCK.DEBUG_NOTRACKING <delay_ms> <timeout_ms> -- Block for <count> milliseconds, then reply with
+ * a random number. Timeout is the command timeout, so that you can test
+ * what happens when the delay is greater than the timeout.
+ * this command does not track background time so the background time should no appear in stats*/
+int HelloBlockNoTracking_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 3) return RedisModule_WrongArity(ctx);
+ long long delay;
+ long long timeout;
+
+ if (RedisModule_StringToLongLong(argv[1],&delay) != REDISMODULE_OK) {
+ return RedisModule_ReplyWithError(ctx,"ERR invalid count");
+ }
+
+ if (RedisModule_StringToLongLong(argv[2],&timeout) != REDISMODULE_OK) {
+ return RedisModule_ReplyWithError(ctx,"ERR invalid count");
+ }
+
+ pthread_t tid;
+ RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx,HelloBlock_Reply,HelloBlock_Timeout,HelloBlock_FreeData,timeout);
+
+ /* Here we set a disconnection handler, however since this module will
+ * block in sleep() in a thread, there is not much we can do in the
+ * callback, so this is just to show you the API. */
+ RedisModule_SetDisconnectCallback(bc,HelloBlock_Disconnected);
+
+ /* Now that we setup a blocking client, we need to pass the control
+ * to the thread. However we need to pass arguments to the thread:
+ * the delay and a reference to the blocked client handle. */
+ void **targ = RedisModule_Alloc(sizeof(void*)*3);
+ targ[0] = bc;
+ targ[1] = (void*)(unsigned long) delay;
+ // pass 0 as flag to enable time tracking
+ targ[2] = (void*)(unsigned long) 0;
+
+ if (pthread_create(&tid,NULL,BlockDebug_ThreadMain,targ) != 0) {
+ RedisModule_AbortBlock(bc);
+ return RedisModule_ReplyWithError(ctx,"-ERR Can't start thread");
+ }
+ return REDISMODULE_OK;
+}
+
+/* BLOCK.DOUBLE_DEBUG <delay_ms> -- Block for 2 x <count> milliseconds,
+ * then reply with a random number.
+ * This command is used to test multiple calls to RedisModule_BlockedClientMeasureTimeStart()
+ * and RedisModule_BlockedClientMeasureTimeEnd() within the same execution. */
+int HelloDoubleBlock_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) return RedisModule_WrongArity(ctx);
+ long long delay;
+
+ if (RedisModule_StringToLongLong(argv[1],&delay) != REDISMODULE_OK) {
+ return RedisModule_ReplyWithError(ctx,"ERR invalid count");
+ }
+
+ pthread_t tid;
+ RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx,HelloBlock_Reply,HelloBlock_Timeout,HelloBlock_FreeData,0);
+
+ /* Now that we setup a blocking client, we need to pass the control
+ * to the thread. However we need to pass arguments to the thread:
+ * the delay and a reference to the blocked client handle. */
+ void **targ = RedisModule_Alloc(sizeof(void*)*2);
+ targ[0] = bc;
+ targ[1] = (void*)(unsigned long) delay;
+
+ if (pthread_create(&tid,NULL,DoubleBlock_ThreadMain,targ) != 0) {
+ RedisModule_AbortBlock(bc);
+ return RedisModule_ReplyWithError(ctx,"-ERR Can't start thread");
+ }
+ return REDISMODULE_OK;
+}
+
+RedisModuleBlockedClient *blocked_client = NULL;
+
+/* BLOCK.BLOCK [TIMEOUT] -- Blocks the current client until released
+ * or TIMEOUT seconds. If TIMEOUT is zero, no timeout function is
+ * registered.
+ */
+int Block_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (RedisModule_IsBlockedReplyRequest(ctx)) {
+ RedisModuleString *r = RedisModule_GetBlockedClientPrivateData(ctx);
+ return RedisModule_ReplyWithString(ctx, r);
+ } else if (RedisModule_IsBlockedTimeoutRequest(ctx)) {
+ RedisModule_UnblockClient(blocked_client, NULL); /* Must be called to avoid leaks. */
+ blocked_client = NULL;
+ return RedisModule_ReplyWithSimpleString(ctx, "Timed out");
+ }
+
+ if (argc != 2) return RedisModule_WrongArity(ctx);
+ long long timeout;
+
+ if (RedisModule_StringToLongLong(argv[1], &timeout) != REDISMODULE_OK) {
+ return RedisModule_ReplyWithError(ctx, "ERR invalid timeout");
+ }
+ if (blocked_client) {
+ return RedisModule_ReplyWithError(ctx, "ERR another client already blocked");
+ }
+
+ /* Block client. We use this function as both a reply and optional timeout
+ * callback and differentiate the different code flows above.
+ */
+ blocked_client = RedisModule_BlockClient(ctx, Block_RedisCommand,
+ timeout > 0 ? Block_RedisCommand : NULL, HelloBlock_FreeStringData, timeout);
+ return REDISMODULE_OK;
+}
+
+/* BLOCK.IS_BLOCKED -- Returns 1 if we have a blocked client, or 0 otherwise.
+ */
+int IsBlocked_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+ RedisModule_ReplyWithLongLong(ctx, blocked_client ? 1 : 0);
+ return REDISMODULE_OK;
+}
+
+/* BLOCK.RELEASE [reply] -- Releases the blocked client and produce the specified reply.
+ */
+int Release_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) return RedisModule_WrongArity(ctx);
+ if (!blocked_client) {
+ return RedisModule_ReplyWithError(ctx, "ERR No blocked client");
+ }
+
+ RedisModuleString *replystr = argv[1];
+ RedisModule_RetainString(ctx, replystr);
+ RedisModule_UnblockClient(blocked_client, replystr);
+ blocked_client = NULL;
+
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+
+ if (RedisModule_Init(ctx,"block",1,REDISMODULE_APIVER_1)
+ == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"block.debug",
+ HelloBlock_RedisCommand,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"block.double_debug",
+ HelloDoubleBlock_RedisCommand,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"block.debug_no_track",
+ HelloBlockNoTracking_RedisCommand,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "block.block",
+ Block_RedisCommand, "", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"block.is_blocked",
+ IsBlocked_RedisCommand,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"block.release",
+ Release_RedisCommand,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/blockonkeys.c b/tests/modules/blockonkeys.c
new file mode 100644
index 0000000..94bb361
--- /dev/null
+++ b/tests/modules/blockonkeys.c
@@ -0,0 +1,645 @@
+#include "redismodule.h"
+
+#include <string.h>
+#include <strings.h>
+#include <assert.h>
+#include <unistd.h>
+
+#define UNUSED(V) ((void) V)
+
+#define LIST_SIZE 1024
+
+/* The FSL (Fixed-Size List) data type is a low-budget imitation of the
+ * native Redis list, in order to test list-like commands implemented
+ * by a module.
+ * Examples: FSL.PUSH, FSL.BPOP, etc. */
+
+typedef struct {
+ long long list[LIST_SIZE];
+ long long length;
+} fsl_t; /* Fixed-size list */
+
+static RedisModuleType *fsltype = NULL;
+
+fsl_t *fsl_type_create(void) {
+ fsl_t *o;
+ o = RedisModule_Alloc(sizeof(*o));
+ o->length = 0;
+ return o;
+}
+
+void fsl_type_free(fsl_t *o) {
+ RedisModule_Free(o);
+}
+
+/* ========================== "fsltype" type methods ======================= */
+
+void *fsl_rdb_load(RedisModuleIO *rdb, int encver) {
+ if (encver != 0) {
+ return NULL;
+ }
+ fsl_t *fsl = fsl_type_create();
+ fsl->length = RedisModule_LoadUnsigned(rdb);
+ for (long long i = 0; i < fsl->length; i++)
+ fsl->list[i] = RedisModule_LoadSigned(rdb);
+ return fsl;
+}
+
+void fsl_rdb_save(RedisModuleIO *rdb, void *value) {
+ fsl_t *fsl = value;
+ RedisModule_SaveUnsigned(rdb,fsl->length);
+ for (long long i = 0; i < fsl->length; i++)
+ RedisModule_SaveSigned(rdb, fsl->list[i]);
+}
+
+void fsl_aofrw(RedisModuleIO *aof, RedisModuleString *key, void *value) {
+ fsl_t *fsl = value;
+ for (long long i = 0; i < fsl->length; i++)
+ RedisModule_EmitAOF(aof, "FSL.PUSH","sl", key, fsl->list[i]);
+}
+
+void fsl_free(void *value) {
+ fsl_type_free(value);
+}
+
+/* ========================== helper methods ======================= */
+
+/* Wrapper to the boilerplate code of opening a key, checking its type, etc.
+ * Returns 0 if `keyname` exists in the dataset, but it's of the wrong type (i.e. not FSL) */
+int get_fsl(RedisModuleCtx *ctx, RedisModuleString *keyname, int mode, int create, fsl_t **fsl, int reply_on_failure) {
+ *fsl = NULL;
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, keyname, mode);
+
+ if (RedisModule_KeyType(key) != REDISMODULE_KEYTYPE_EMPTY) {
+ /* Key exists */
+ if (RedisModule_ModuleTypeGetType(key) != fsltype) {
+ /* Key is not FSL */
+ RedisModule_CloseKey(key);
+ if (reply_on_failure)
+ RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
+ RedisModuleCallReply *reply = RedisModule_Call(ctx, "INCR", "c", "fsl_wrong_type");
+ RedisModule_FreeCallReply(reply);
+ return 0;
+ }
+
+ *fsl = RedisModule_ModuleTypeGetValue(key);
+ if (*fsl && !(*fsl)->length && mode & REDISMODULE_WRITE) {
+ /* Key exists, but it's logically empty */
+ if (create) {
+ create = 0; /* No need to create, key exists in its basic state */
+ } else {
+ RedisModule_DeleteKey(key);
+ *fsl = NULL;
+ }
+ } else {
+ /* Key exists, and has elements in it - no need to create anything */
+ create = 0;
+ }
+ }
+
+ if (create) {
+ *fsl = fsl_type_create();
+ RedisModule_ModuleTypeSetValue(key, fsltype, *fsl);
+ }
+
+ RedisModule_CloseKey(key);
+ return 1;
+}
+
+/* ========================== commands ======================= */
+
+/* FSL.PUSH <key> <int> - Push an integer to the fixed-size list (to the right).
+ * It must be greater than the element in the head of the list. */
+int fsl_push(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 3)
+ return RedisModule_WrongArity(ctx);
+
+ long long ele;
+ if (RedisModule_StringToLongLong(argv[2],&ele) != REDISMODULE_OK)
+ return RedisModule_ReplyWithError(ctx,"ERR invalid integer");
+
+ fsl_t *fsl;
+ if (!get_fsl(ctx, argv[1], REDISMODULE_WRITE, 1, &fsl, 1))
+ return REDISMODULE_OK;
+
+ if (fsl->length == LIST_SIZE)
+ return RedisModule_ReplyWithError(ctx,"ERR list is full");
+
+ if (fsl->length != 0 && fsl->list[fsl->length-1] >= ele)
+ return RedisModule_ReplyWithError(ctx,"ERR new element has to be greater than the head element");
+
+ fsl->list[fsl->length++] = ele;
+ RedisModule_SignalKeyAsReady(ctx, argv[1]);
+
+ RedisModule_ReplicateVerbatim(ctx);
+
+ return RedisModule_ReplyWithSimpleString(ctx, "OK");
+}
+
+typedef struct {
+ RedisModuleString *keyname;
+ long long ele;
+} timer_data_t;
+
+static void timer_callback(RedisModuleCtx *ctx, void *data)
+{
+ timer_data_t *td = data;
+
+ fsl_t *fsl;
+ if (!get_fsl(ctx, td->keyname, REDISMODULE_WRITE, 1, &fsl, 1))
+ return;
+
+ if (fsl->length == LIST_SIZE)
+ return; /* list is full */
+
+ if (fsl->length != 0 && fsl->list[fsl->length-1] >= td->ele)
+ return; /* new element has to be greater than the head element */
+
+ fsl->list[fsl->length++] = td->ele;
+ RedisModule_SignalKeyAsReady(ctx, td->keyname);
+
+ RedisModule_Replicate(ctx, "FSL.PUSH", "sl", td->keyname, td->ele);
+
+ RedisModule_FreeString(ctx, td->keyname);
+ RedisModule_Free(td);
+}
+
+/* FSL.PUSHTIMER <key> <int> <period-in-ms> - Push the number 9000 to the fixed-size list (to the right).
+ * It must be greater than the element in the head of the list. */
+int fsl_pushtimer(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc != 4)
+ return RedisModule_WrongArity(ctx);
+
+ long long ele;
+ if (RedisModule_StringToLongLong(argv[2],&ele) != REDISMODULE_OK)
+ return RedisModule_ReplyWithError(ctx,"ERR invalid integer");
+
+ long long period;
+ if (RedisModule_StringToLongLong(argv[3],&period) != REDISMODULE_OK)
+ return RedisModule_ReplyWithError(ctx,"ERR invalid period");
+
+ fsl_t *fsl;
+ if (!get_fsl(ctx, argv[1], REDISMODULE_WRITE, 1, &fsl, 1))
+ return REDISMODULE_OK;
+
+ if (fsl->length == LIST_SIZE)
+ return RedisModule_ReplyWithError(ctx,"ERR list is full");
+
+ timer_data_t *td = RedisModule_Alloc(sizeof(*td));
+ td->keyname = argv[1];
+ RedisModule_RetainString(ctx, td->keyname);
+ td->ele = ele;
+
+ RedisModuleTimerID id = RedisModule_CreateTimer(ctx, period, timer_callback, td);
+ RedisModule_ReplyWithLongLong(ctx, id);
+
+ return REDISMODULE_OK;
+}
+
+int bpop_reply_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ RedisModuleString *keyname = RedisModule_GetBlockedClientReadyKey(ctx);
+
+ fsl_t *fsl;
+ if (!get_fsl(ctx, keyname, REDISMODULE_WRITE, 0, &fsl, 0) || !fsl)
+ return REDISMODULE_ERR;
+
+ RedisModule_Assert(fsl->length);
+ RedisModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]);
+
+ /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
+ RedisModule_ReplicateVerbatim(ctx);
+ return REDISMODULE_OK;
+}
+
+int bpop_timeout_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ return RedisModule_ReplyWithSimpleString(ctx, "Request timedout");
+}
+
+/* FSL.BPOP <key> <timeout> [NO_TO_CB]- Block clients until list has two or more elements.
+ * When that happens, unblock client and pop the last two elements (from the right). */
+int fsl_bpop(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc < 3)
+ return RedisModule_WrongArity(ctx);
+
+ long long timeout;
+ if (RedisModule_StringToLongLong(argv[2],&timeout) != REDISMODULE_OK || timeout < 0)
+ return RedisModule_ReplyWithError(ctx,"ERR invalid timeout");
+
+ int to_cb = 1;
+ if (argc == 4) {
+ if (strcasecmp("NO_TO_CB", RedisModule_StringPtrLen(argv[3], NULL)))
+ return RedisModule_ReplyWithError(ctx,"ERR invalid argument");
+ to_cb = 0;
+ }
+
+ fsl_t *fsl;
+ if (!get_fsl(ctx, argv[1], REDISMODULE_WRITE, 0, &fsl, 1))
+ return REDISMODULE_OK;
+
+ if (!fsl) {
+ RedisModule_BlockClientOnKeys(ctx, bpop_reply_callback, to_cb ? bpop_timeout_callback : NULL,
+ NULL, timeout, &argv[1], 1, NULL);
+ } else {
+ RedisModule_Assert(fsl->length);
+ RedisModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]);
+ /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
+ RedisModule_ReplicateVerbatim(ctx);
+ }
+
+ return REDISMODULE_OK;
+}
+
+int bpopgt_reply_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ RedisModuleString *keyname = RedisModule_GetBlockedClientReadyKey(ctx);
+ long long *pgt = RedisModule_GetBlockedClientPrivateData(ctx);
+
+ fsl_t *fsl;
+ if (!get_fsl(ctx, keyname, REDISMODULE_WRITE, 0, &fsl, 0) || !fsl)
+ return RedisModule_ReplyWithError(ctx,"UNBLOCKED key no longer exists");
+
+ if (fsl->list[fsl->length-1] <= *pgt)
+ return REDISMODULE_ERR;
+
+ RedisModule_Assert(fsl->length);
+ RedisModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]);
+ /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
+ RedisModule_ReplicateVerbatim(ctx);
+ return REDISMODULE_OK;
+}
+
+int bpopgt_timeout_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ return RedisModule_ReplyWithSimpleString(ctx, "Request timedout");
+}
+
+void bpopgt_free_privdata(RedisModuleCtx *ctx, void *privdata) {
+ REDISMODULE_NOT_USED(ctx);
+ RedisModule_Free(privdata);
+}
+
+/* FSL.BPOPGT <key> <gt> <timeout> - Block clients until list has an element greater than <gt>.
+ * When that happens, unblock client and pop the last element (from the right). */
+int fsl_bpopgt(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 4)
+ return RedisModule_WrongArity(ctx);
+
+ long long gt;
+ if (RedisModule_StringToLongLong(argv[2],&gt) != REDISMODULE_OK)
+ return RedisModule_ReplyWithError(ctx,"ERR invalid integer");
+
+ long long timeout;
+ if (RedisModule_StringToLongLong(argv[3],&timeout) != REDISMODULE_OK || timeout < 0)
+ return RedisModule_ReplyWithError(ctx,"ERR invalid timeout");
+
+ fsl_t *fsl;
+ if (!get_fsl(ctx, argv[1], REDISMODULE_WRITE, 0, &fsl, 1))
+ return REDISMODULE_OK;
+
+ if (!fsl)
+ return RedisModule_ReplyWithError(ctx,"ERR key must exist");
+
+ if (fsl->list[fsl->length-1] <= gt) {
+ /* We use malloc so the tests in blockedonkeys.tcl can check for memory leaks */
+ long long *pgt = RedisModule_Alloc(sizeof(long long));
+ *pgt = gt;
+ RedisModule_BlockClientOnKeysWithFlags(
+ ctx, bpopgt_reply_callback, bpopgt_timeout_callback,
+ bpopgt_free_privdata, timeout, &argv[1], 1, pgt,
+ REDISMODULE_BLOCK_UNBLOCK_DELETED);
+ } else {
+ RedisModule_Assert(fsl->length);
+ RedisModule_ReplyWithLongLong(ctx, fsl->list[--fsl->length]);
+ /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
+ RedisModule_ReplicateVerbatim(ctx);
+ }
+
+ return REDISMODULE_OK;
+}
+
+int bpoppush_reply_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ RedisModuleString *src_keyname = RedisModule_GetBlockedClientReadyKey(ctx);
+ RedisModuleString *dst_keyname = RedisModule_GetBlockedClientPrivateData(ctx);
+
+ fsl_t *src;
+ if (!get_fsl(ctx, src_keyname, REDISMODULE_WRITE, 0, &src, 0) || !src)
+ return REDISMODULE_ERR;
+
+ fsl_t *dst;
+ if (!get_fsl(ctx, dst_keyname, REDISMODULE_WRITE, 1, &dst, 0) || !dst)
+ return REDISMODULE_ERR;
+
+ RedisModule_Assert(src->length);
+ long long ele = src->list[--src->length];
+ dst->list[dst->length++] = ele;
+ RedisModule_SignalKeyAsReady(ctx, dst_keyname);
+ /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
+ RedisModule_ReplicateVerbatim(ctx);
+ return RedisModule_ReplyWithLongLong(ctx, ele);
+}
+
+int bpoppush_timeout_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ return RedisModule_ReplyWithSimpleString(ctx, "Request timedout");
+}
+
+void bpoppush_free_privdata(RedisModuleCtx *ctx, void *privdata) {
+ RedisModule_FreeString(ctx, privdata);
+}
+
+/* FSL.BPOPPUSH <src> <dst> <timeout> - Block clients until <src> has an element.
+ * When that happens, unblock client, pop the last element from <src> and push it to <dst>
+ * (from the right). */
+int fsl_bpoppush(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 4)
+ return RedisModule_WrongArity(ctx);
+
+ long long timeout;
+ if (RedisModule_StringToLongLong(argv[3],&timeout) != REDISMODULE_OK || timeout < 0)
+ return RedisModule_ReplyWithError(ctx,"ERR invalid timeout");
+
+ fsl_t *src;
+ if (!get_fsl(ctx, argv[1], REDISMODULE_WRITE, 0, &src, 1))
+ return REDISMODULE_OK;
+
+ if (!src) {
+ /* Retain string for reply callback */
+ RedisModule_RetainString(ctx, argv[2]);
+ /* Key is empty, we must block */
+ RedisModule_BlockClientOnKeys(ctx, bpoppush_reply_callback, bpoppush_timeout_callback,
+ bpoppush_free_privdata, timeout, &argv[1], 1, argv[2]);
+ } else {
+ fsl_t *dst;
+ if (!get_fsl(ctx, argv[2], REDISMODULE_WRITE, 1, &dst, 1))
+ return REDISMODULE_OK;
+
+ RedisModule_Assert(src->length);
+ long long ele = src->list[--src->length];
+ dst->list[dst->length++] = ele;
+ RedisModule_SignalKeyAsReady(ctx, argv[2]);
+ RedisModule_ReplyWithLongLong(ctx, ele);
+ /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
+ RedisModule_ReplicateVerbatim(ctx);
+ }
+
+ return REDISMODULE_OK;
+}
+
+/* FSL.GETALL <key> - Reply with an array containing all elements. */
+int fsl_getall(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2)
+ return RedisModule_WrongArity(ctx);
+
+ fsl_t *fsl;
+ if (!get_fsl(ctx, argv[1], REDISMODULE_READ, 0, &fsl, 1))
+ return REDISMODULE_OK;
+
+ if (!fsl)
+ return RedisModule_ReplyWithArray(ctx, 0);
+
+ RedisModule_ReplyWithArray(ctx, fsl->length);
+ for (int i = 0; i < fsl->length; i++)
+ RedisModule_ReplyWithLongLong(ctx, fsl->list[i]);
+ return REDISMODULE_OK;
+}
+
+/* Callback for blockonkeys_popall */
+int blockonkeys_popall_reply_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argc);
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+ if (RedisModule_KeyType(key) == REDISMODULE_KEYTYPE_LIST) {
+ RedisModuleString *elem;
+ long len = 0;
+ RedisModule_ReplyWithArray(ctx, REDISMODULE_POSTPONED_ARRAY_LEN);
+ while ((elem = RedisModule_ListPop(key, REDISMODULE_LIST_HEAD)) != NULL) {
+ len++;
+ RedisModule_ReplyWithString(ctx, elem);
+ RedisModule_FreeString(ctx, elem);
+ }
+ /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
+ RedisModule_ReplicateVerbatim(ctx);
+ RedisModule_ReplySetArrayLength(ctx, len);
+ } else {
+ RedisModule_ReplyWithError(ctx, "ERR Not a list");
+ }
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+int blockonkeys_popall_timeout_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ return RedisModule_ReplyWithError(ctx, "ERR Timeout");
+}
+
+/* BLOCKONKEYS.POPALL key
+ *
+ * Blocks on an empty key for up to 3 seconds. When unblocked by a list
+ * operation like LPUSH, all the elements are popped and returned. Fails with an
+ * error on timeout. */
+int blockonkeys_popall(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2)
+ return RedisModule_WrongArity(ctx);
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_READ);
+ if (RedisModule_KeyType(key) == REDISMODULE_KEYTYPE_EMPTY) {
+ RedisModule_BlockClientOnKeys(ctx, blockonkeys_popall_reply_callback,
+ blockonkeys_popall_timeout_callback,
+ NULL, 3000, &argv[1], 1, NULL);
+ } else {
+ RedisModule_ReplyWithError(ctx, "ERR Key not empty");
+ }
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+/* BLOCKONKEYS.LPUSH key val [val ..]
+ * BLOCKONKEYS.LPUSH_UNBLOCK key val [val ..]
+ *
+ * A module equivalent of LPUSH. If the name LPUSH_UNBLOCK is used,
+ * RM_SignalKeyAsReady() is also called. */
+int blockonkeys_lpush(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc < 3)
+ return RedisModule_WrongArity(ctx);
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+ if (RedisModule_KeyType(key) != REDISMODULE_KEYTYPE_EMPTY &&
+ RedisModule_KeyType(key) != REDISMODULE_KEYTYPE_LIST) {
+ RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
+ } else {
+ for (int i = 2; i < argc; i++) {
+ if (RedisModule_ListPush(key, REDISMODULE_LIST_HEAD,
+ argv[i]) != REDISMODULE_OK) {
+ RedisModule_CloseKey(key);
+ return RedisModule_ReplyWithError(ctx, "ERR Push failed");
+ }
+ }
+ }
+ RedisModule_CloseKey(key);
+
+ /* signal key as ready if the command is lpush_unblock */
+ size_t len;
+ const char *str = RedisModule_StringPtrLen(argv[0], &len);
+ if (!strncasecmp(str, "blockonkeys.lpush_unblock", len)) {
+ RedisModule_SignalKeyAsReady(ctx, argv[1]);
+ }
+ RedisModule_ReplicateVerbatim(ctx);
+ return RedisModule_ReplyWithSimpleString(ctx, "OK");
+}
+
+/* Callback for the BLOCKONKEYS.BLPOPN command */
+int blockonkeys_blpopn_reply_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argc);
+ long long n;
+ RedisModule_StringToLongLong(argv[2], &n);
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+ int result;
+ if (RedisModule_KeyType(key) == REDISMODULE_KEYTYPE_LIST &&
+ RedisModule_ValueLength(key) >= (size_t)n) {
+ RedisModule_ReplyWithArray(ctx, n);
+ for (long i = 0; i < n; i++) {
+ RedisModuleString *elem = RedisModule_ListPop(key, REDISMODULE_LIST_HEAD);
+ RedisModule_ReplyWithString(ctx, elem);
+ RedisModule_FreeString(ctx, elem);
+ }
+ /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
+ RedisModule_ReplicateVerbatim(ctx);
+ result = REDISMODULE_OK;
+ } else if (RedisModule_KeyType(key) == REDISMODULE_KEYTYPE_LIST ||
+ RedisModule_KeyType(key) == REDISMODULE_KEYTYPE_EMPTY) {
+ const char *module_cmd = RedisModule_StringPtrLen(argv[0], NULL);
+ if (!strcasecmp(module_cmd, "blockonkeys.blpopn_or_unblock"))
+ RedisModule_UnblockClient(RedisModule_GetBlockedClientHandle(ctx), NULL);
+
+ /* continue blocking */
+ result = REDISMODULE_ERR;
+ } else {
+ result = RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
+ }
+ RedisModule_CloseKey(key);
+ return result;
+}
+
+int blockonkeys_blpopn_timeout_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ return RedisModule_ReplyWithError(ctx, "ERR Timeout");
+}
+
+int blockonkeys_blpopn_abort_callback(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ return RedisModule_ReplyWithSimpleString(ctx, "Action aborted");
+}
+
+/* BLOCKONKEYS.BLPOPN key N
+ *
+ * Blocks until key has N elements and then pops them or fails after 3 seconds.
+ */
+int blockonkeys_blpopn(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc < 3) return RedisModule_WrongArity(ctx);
+
+ long long n, timeout = 3000LL;
+ if (RedisModule_StringToLongLong(argv[2], &n) != REDISMODULE_OK) {
+ return RedisModule_ReplyWithError(ctx, "ERR Invalid N");
+ }
+
+ if (argc > 3 ) {
+ if (RedisModule_StringToLongLong(argv[3], &timeout) != REDISMODULE_OK) {
+ return RedisModule_ReplyWithError(ctx, "ERR Invalid timeout value");
+ }
+ }
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+ int keytype = RedisModule_KeyType(key);
+ if (keytype != REDISMODULE_KEYTYPE_EMPTY &&
+ keytype != REDISMODULE_KEYTYPE_LIST) {
+ RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
+ } else if (keytype == REDISMODULE_KEYTYPE_LIST &&
+ RedisModule_ValueLength(key) >= (size_t)n) {
+ RedisModule_ReplyWithArray(ctx, n);
+ for (long i = 0; i < n; i++) {
+ RedisModuleString *elem = RedisModule_ListPop(key, REDISMODULE_LIST_HEAD);
+ RedisModule_ReplyWithString(ctx, elem);
+ RedisModule_FreeString(ctx, elem);
+ }
+ /* I'm lazy so i'll replicate a potentially blocking command, it shouldn't block in this flow. */
+ RedisModule_ReplicateVerbatim(ctx);
+ } else {
+ RedisModule_BlockClientOnKeys(ctx, blockonkeys_blpopn_reply_callback,
+ timeout ? blockonkeys_blpopn_timeout_callback : blockonkeys_blpopn_abort_callback,
+ NULL, timeout, &argv[1], 1, NULL);
+ }
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx, "blockonkeys", 1, REDISMODULE_APIVER_1)== REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ RedisModuleTypeMethods tm = {
+ .version = REDISMODULE_TYPE_METHOD_VERSION,
+ .rdb_load = fsl_rdb_load,
+ .rdb_save = fsl_rdb_save,
+ .aof_rewrite = fsl_aofrw,
+ .mem_usage = NULL,
+ .free = fsl_free,
+ .digest = NULL,
+ };
+
+ fsltype = RedisModule_CreateDataType(ctx, "fsltype_t", 0, &tm);
+ if (fsltype == NULL)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"fsl.push",fsl_push,"write",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"fsl.pushtimer",fsl_pushtimer,"write",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"fsl.bpop",fsl_bpop,"write",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"fsl.bpopgt",fsl_bpopgt,"write",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"fsl.bpoppush",fsl_bpoppush,"write",1,2,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"fsl.getall",fsl_getall,"",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "blockonkeys.popall", blockonkeys_popall,
+ "write", 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "blockonkeys.lpush", blockonkeys_lpush,
+ "write", 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "blockonkeys.lpush_unblock", blockonkeys_lpush,
+ "write", 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "blockonkeys.blpopn", blockonkeys_blpopn,
+ "write", 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "blockonkeys.blpopn_or_unblock", blockonkeys_blpopn,
+ "write", 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/cmdintrospection.c b/tests/modules/cmdintrospection.c
new file mode 100644
index 0000000..1a5e486
--- /dev/null
+++ b/tests/modules/cmdintrospection.c
@@ -0,0 +1,158 @@
+#include "redismodule.h"
+
+#define UNUSED(V) ((void) V)
+
+int cmd_xadd(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ if (RedisModule_Init(ctx, "cmdintrospection", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"cmdintrospection.xadd",cmd_xadd,"write deny-oom random fast",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ RedisModuleCommand *xadd = RedisModule_GetCommand(ctx,"cmdintrospection.xadd");
+
+ RedisModuleCommandInfo info = {
+ .version = REDISMODULE_COMMAND_INFO_VERSION,
+ .arity = -5,
+ .summary = "Appends a new message to a stream. Creates the key if it doesn't exist.",
+ .since = "5.0.0",
+ .complexity = "O(1) when adding a new entry, O(N) when trimming where N being the number of entries evicted.",
+ .tips = "nondeterministic_output",
+ .history = (RedisModuleCommandHistoryEntry[]){
+ /* NOTE: All versions specified should be the module's versions, not
+ * Redis'! We use Redis versions in this example for the purpose of
+ * testing (comparing the output with the output of the vanilla
+ * XADD). */
+ {"6.2.0", "Added the `NOMKSTREAM` option, `MINID` trimming strategy and the `LIMIT` option."},
+ {"7.0.0", "Added support for the `<ms>-*` explicit ID form."},
+ {0}
+ },
+ .key_specs = (RedisModuleCommandKeySpec[]){
+ {
+ .notes = "UPDATE instead of INSERT because of the optional trimming feature",
+ .flags = REDISMODULE_CMD_KEY_RW | REDISMODULE_CMD_KEY_UPDATE,
+ .begin_search_type = REDISMODULE_KSPEC_BS_INDEX,
+ .bs.index.pos = 1,
+ .find_keys_type = REDISMODULE_KSPEC_FK_RANGE,
+ .fk.range = {0,1,0}
+ },
+ {0}
+ },
+ .args = (RedisModuleCommandArg[]){
+ {
+ .name = "key",
+ .type = REDISMODULE_ARG_TYPE_KEY,
+ .key_spec_index = 0
+ },
+ {
+ .name = "nomkstream",
+ .type = REDISMODULE_ARG_TYPE_PURE_TOKEN,
+ .token = "NOMKSTREAM",
+ .since = "6.2.0",
+ .flags = REDISMODULE_CMD_ARG_OPTIONAL
+ },
+ {
+ .name = "trim",
+ .type = REDISMODULE_ARG_TYPE_BLOCK,
+ .flags = REDISMODULE_CMD_ARG_OPTIONAL,
+ .subargs = (RedisModuleCommandArg[]){
+ {
+ .name = "strategy",
+ .type = REDISMODULE_ARG_TYPE_ONEOF,
+ .subargs = (RedisModuleCommandArg[]){
+ {
+ .name = "maxlen",
+ .type = REDISMODULE_ARG_TYPE_PURE_TOKEN,
+ .token = "MAXLEN",
+ },
+ {
+ .name = "minid",
+ .type = REDISMODULE_ARG_TYPE_PURE_TOKEN,
+ .token = "MINID",
+ .since = "6.2.0",
+ },
+ {0}
+ }
+ },
+ {
+ .name = "operator",
+ .type = REDISMODULE_ARG_TYPE_ONEOF,
+ .flags = REDISMODULE_CMD_ARG_OPTIONAL,
+ .subargs = (RedisModuleCommandArg[]){
+ {
+ .name = "equal",
+ .type = REDISMODULE_ARG_TYPE_PURE_TOKEN,
+ .token = "="
+ },
+ {
+ .name = "approximately",
+ .type = REDISMODULE_ARG_TYPE_PURE_TOKEN,
+ .token = "~"
+ },
+ {0}
+ }
+ },
+ {
+ .name = "threshold",
+ .type = REDISMODULE_ARG_TYPE_STRING,
+ .display_text = "threshold" /* Just for coverage, doesn't have a visible effect */
+ },
+ {
+ .name = "count",
+ .type = REDISMODULE_ARG_TYPE_INTEGER,
+ .token = "LIMIT",
+ .since = "6.2.0",
+ .flags = REDISMODULE_CMD_ARG_OPTIONAL
+ },
+ {0}
+ }
+ },
+ {
+ .name = "id-selector",
+ .type = REDISMODULE_ARG_TYPE_ONEOF,
+ .subargs = (RedisModuleCommandArg[]){
+ {
+ .name = "auto-id",
+ .type = REDISMODULE_ARG_TYPE_PURE_TOKEN,
+ .token = "*"
+ },
+ {
+ .name = "id",
+ .type = REDISMODULE_ARG_TYPE_STRING,
+ },
+ {0}
+ }
+ },
+ {
+ .name = "data",
+ .type = REDISMODULE_ARG_TYPE_BLOCK,
+ .flags = REDISMODULE_CMD_ARG_MULTIPLE,
+ .subargs = (RedisModuleCommandArg[]){
+ {
+ .name = "field",
+ .type = REDISMODULE_ARG_TYPE_STRING,
+ },
+ {
+ .name = "value",
+ .type = REDISMODULE_ARG_TYPE_STRING,
+ },
+ {0}
+ }
+ },
+ {0}
+ }
+ };
+ if (RedisModule_SetCommandInfo(xadd, &info) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/commandfilter.c b/tests/modules/commandfilter.c
new file mode 100644
index 0000000..56e517a
--- /dev/null
+++ b/tests/modules/commandfilter.c
@@ -0,0 +1,251 @@
+#include "redismodule.h"
+
+#include <string.h>
+#include <strings.h>
+
+static RedisModuleString *log_key_name;
+
+static const char log_command_name[] = "commandfilter.log";
+static const char ping_command_name[] = "commandfilter.ping";
+static const char retained_command_name[] = "commandfilter.retained";
+static const char unregister_command_name[] = "commandfilter.unregister";
+static const char unfiltered_clientid_name[] = "unfilter_clientid";
+static int in_log_command = 0;
+
+unsigned long long unfiltered_clientid = 0;
+
+static RedisModuleCommandFilter *filter, *filter1;
+static RedisModuleString *retained;
+
+int CommandFilter_UnregisterCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ (void) argc;
+ (void) argv;
+
+ RedisModule_ReplyWithLongLong(ctx,
+ RedisModule_UnregisterCommandFilter(ctx, filter));
+
+ return REDISMODULE_OK;
+}
+
+int CommandFilter_PingCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ (void) argc;
+ (void) argv;
+
+ RedisModuleCallReply *reply = RedisModule_Call(ctx, "ping", "c", "@log");
+ if (reply) {
+ RedisModule_ReplyWithCallReply(ctx, reply);
+ RedisModule_FreeCallReply(reply);
+ } else {
+ RedisModule_ReplyWithSimpleString(ctx, "Unknown command or invalid arguments");
+ }
+
+ return REDISMODULE_OK;
+}
+
+int CommandFilter_Retained(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ (void) argc;
+ (void) argv;
+
+ if (retained) {
+ RedisModule_ReplyWithString(ctx, retained);
+ } else {
+ RedisModule_ReplyWithNull(ctx);
+ }
+
+ return REDISMODULE_OK;
+}
+
+int CommandFilter_LogCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ RedisModuleString *s = RedisModule_CreateString(ctx, "", 0);
+
+ int i;
+ for (i = 1; i < argc; i++) {
+ size_t arglen;
+ const char *arg = RedisModule_StringPtrLen(argv[i], &arglen);
+
+ if (i > 1) RedisModule_StringAppendBuffer(ctx, s, " ", 1);
+ RedisModule_StringAppendBuffer(ctx, s, arg, arglen);
+ }
+
+ RedisModuleKey *log = RedisModule_OpenKey(ctx, log_key_name, REDISMODULE_WRITE|REDISMODULE_READ);
+ RedisModule_ListPush(log, REDISMODULE_LIST_HEAD, s);
+ RedisModule_CloseKey(log);
+ RedisModule_FreeString(ctx, s);
+
+ in_log_command = 1;
+
+ size_t cmdlen;
+ const char *cmdname = RedisModule_StringPtrLen(argv[1], &cmdlen);
+ RedisModuleCallReply *reply = RedisModule_Call(ctx, cmdname, "v", &argv[2], argc - 2);
+ if (reply) {
+ RedisModule_ReplyWithCallReply(ctx, reply);
+ RedisModule_FreeCallReply(reply);
+ } else {
+ RedisModule_ReplyWithSimpleString(ctx, "Unknown command or invalid arguments");
+ }
+
+ in_log_command = 0;
+
+ return REDISMODULE_OK;
+}
+
+int CommandFilter_UnfilteredClientId(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc < 2)
+ return RedisModule_WrongArity(ctx);
+
+ long long id;
+ if (RedisModule_StringToLongLong(argv[1], &id) != REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "invalid client id");
+ return REDISMODULE_OK;
+ }
+ if (id < 0) {
+ RedisModule_ReplyWithError(ctx, "invalid client id");
+ return REDISMODULE_OK;
+ }
+
+ unfiltered_clientid = id;
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+/* Filter to protect against Bug #11894 reappearing
+ *
+ * ensures that the filter is only run the first time through, and not on reprocessing
+ */
+void CommandFilter_BlmoveSwap(RedisModuleCommandFilterCtx *filter)
+{
+ if (RedisModule_CommandFilterArgsCount(filter) != 6)
+ return;
+
+ RedisModuleString *arg = RedisModule_CommandFilterArgGet(filter, 0);
+ size_t arg_len;
+ const char *arg_str = RedisModule_StringPtrLen(arg, &arg_len);
+
+ if (arg_len != 6 || strncmp(arg_str, "blmove", 6))
+ return;
+
+ /*
+ * Swapping directional args (right/left) from source and destination.
+ * need to hold here, can't push into the ArgReplace func, as it will cause other to freed -> use after free
+ */
+ RedisModuleString *dir1 = RedisModule_HoldString(NULL, RedisModule_CommandFilterArgGet(filter, 3));
+ RedisModuleString *dir2 = RedisModule_HoldString(NULL, RedisModule_CommandFilterArgGet(filter, 4));
+ RedisModule_CommandFilterArgReplace(filter, 3, dir2);
+ RedisModule_CommandFilterArgReplace(filter, 4, dir1);
+}
+
+void CommandFilter_CommandFilter(RedisModuleCommandFilterCtx *filter)
+{
+ unsigned long long id = RedisModule_CommandFilterGetClientId(filter);
+ if (id == unfiltered_clientid) return;
+
+ if (in_log_command) return; /* don't process our own RM_Call() from CommandFilter_LogCommand() */
+
+ /* Fun manipulations:
+ * - Remove @delme
+ * - Replace @replaceme
+ * - Append @insertbefore or @insertafter
+ * - Prefix with Log command if @log encountered
+ */
+ int log = 0;
+ int pos = 0;
+ while (pos < RedisModule_CommandFilterArgsCount(filter)) {
+ const RedisModuleString *arg = RedisModule_CommandFilterArgGet(filter, pos);
+ size_t arg_len;
+ const char *arg_str = RedisModule_StringPtrLen(arg, &arg_len);
+
+ if (arg_len == 6 && !memcmp(arg_str, "@delme", 6)) {
+ RedisModule_CommandFilterArgDelete(filter, pos);
+ continue;
+ }
+ if (arg_len == 10 && !memcmp(arg_str, "@replaceme", 10)) {
+ RedisModule_CommandFilterArgReplace(filter, pos,
+ RedisModule_CreateString(NULL, "--replaced--", 12));
+ } else if (arg_len == 13 && !memcmp(arg_str, "@insertbefore", 13)) {
+ RedisModule_CommandFilterArgInsert(filter, pos,
+ RedisModule_CreateString(NULL, "--inserted-before--", 19));
+ pos++;
+ } else if (arg_len == 12 && !memcmp(arg_str, "@insertafter", 12)) {
+ RedisModule_CommandFilterArgInsert(filter, pos + 1,
+ RedisModule_CreateString(NULL, "--inserted-after--", 18));
+ pos++;
+ } else if (arg_len == 7 && !memcmp(arg_str, "@retain", 7)) {
+ if (retained) RedisModule_FreeString(NULL, retained);
+ retained = RedisModule_CommandFilterArgGet(filter, pos + 1);
+ RedisModule_RetainString(NULL, retained);
+ pos++;
+ } else if (arg_len == 4 && !memcmp(arg_str, "@log", 4)) {
+ log = 1;
+ }
+ pos++;
+ }
+
+ if (log) RedisModule_CommandFilterArgInsert(filter, 0,
+ RedisModule_CreateString(NULL, log_command_name, sizeof(log_command_name)-1));
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (RedisModule_Init(ctx,"commandfilter",1,REDISMODULE_APIVER_1)
+ == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ if (argc != 2 && argc != 3) {
+ RedisModule_Log(ctx, "warning", "Log key name not specified");
+ return REDISMODULE_ERR;
+ }
+
+ long long noself = 0;
+ log_key_name = RedisModule_CreateStringFromString(ctx, argv[0]);
+ RedisModule_StringToLongLong(argv[1], &noself);
+ retained = NULL;
+
+ if (RedisModule_CreateCommand(ctx,log_command_name,
+ CommandFilter_LogCommand,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,ping_command_name,
+ CommandFilter_PingCommand,"deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,retained_command_name,
+ CommandFilter_Retained,"readonly",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,unregister_command_name,
+ CommandFilter_UnregisterCommand,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, unfiltered_clientid_name,
+ CommandFilter_UnfilteredClientId, "admin", 1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if ((filter = RedisModule_RegisterCommandFilter(ctx, CommandFilter_CommandFilter,
+ noself ? REDISMODULE_CMDFILTER_NOSELF : 0))
+ == NULL) return REDISMODULE_ERR;
+
+ if ((filter1 = RedisModule_RegisterCommandFilter(ctx, CommandFilter_BlmoveSwap, 0)) == NULL)
+ return REDISMODULE_ERR;
+
+ if (argc == 3) {
+ const char *ptr = RedisModule_StringPtrLen(argv[2], NULL);
+ if (!strcasecmp(ptr, "noload")) {
+ /* This is a hint that we return ERR at the last moment of OnLoad. */
+ RedisModule_FreeString(ctx, log_key_name);
+ if (retained) RedisModule_FreeString(NULL, retained);
+ return REDISMODULE_ERR;
+ }
+ }
+
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnUnload(RedisModuleCtx *ctx) {
+ RedisModule_FreeString(ctx, log_key_name);
+ if (retained) RedisModule_FreeString(NULL, retained);
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/datatype.c b/tests/modules/datatype.c
new file mode 100644
index 0000000..408d1a5
--- /dev/null
+++ b/tests/modules/datatype.c
@@ -0,0 +1,314 @@
+/* This module current tests a small subset but should be extended in the future
+ * for general ModuleDataType coverage.
+ */
+
+/* define macros for having usleep */
+#define _BSD_SOURCE
+#define _DEFAULT_SOURCE
+#include <unistd.h>
+
+#include "redismodule.h"
+
+static RedisModuleType *datatype = NULL;
+static int load_encver = 0;
+
+/* used to test processing events during slow loading */
+static volatile int slow_loading = 0;
+static volatile int is_in_slow_loading = 0;
+
+#define DATATYPE_ENC_VER 1
+
+typedef struct {
+ long long intval;
+ RedisModuleString *strval;
+} DataType;
+
+static void *datatype_load(RedisModuleIO *io, int encver) {
+ load_encver = encver;
+ int intval = RedisModule_LoadSigned(io);
+ if (RedisModule_IsIOError(io)) return NULL;
+
+ RedisModuleString *strval = RedisModule_LoadString(io);
+ if (RedisModule_IsIOError(io)) return NULL;
+
+ DataType *dt = (DataType *) RedisModule_Alloc(sizeof(DataType));
+ dt->intval = intval;
+ dt->strval = strval;
+
+ if (slow_loading) {
+ RedisModuleCtx *ctx = RedisModule_GetContextFromIO(io);
+ is_in_slow_loading = 1;
+ while (slow_loading) {
+ RedisModule_Yield(ctx, REDISMODULE_YIELD_FLAG_CLIENTS, "Slow module operation");
+ usleep(1000);
+ }
+ is_in_slow_loading = 0;
+ }
+
+ return dt;
+}
+
+static void datatype_save(RedisModuleIO *io, void *value) {
+ DataType *dt = (DataType *) value;
+ RedisModule_SaveSigned(io, dt->intval);
+ RedisModule_SaveString(io, dt->strval);
+}
+
+static void datatype_free(void *value) {
+ if (value) {
+ DataType *dt = (DataType *) value;
+
+ if (dt->strval) RedisModule_FreeString(NULL, dt->strval);
+ RedisModule_Free(dt);
+ }
+}
+
+static void *datatype_copy(RedisModuleString *fromkey, RedisModuleString *tokey, const void *value) {
+ const DataType *old = value;
+
+ /* Answers to ultimate questions cannot be copied! */
+ if (old->intval == 42)
+ return NULL;
+
+ DataType *new = (DataType *) RedisModule_Alloc(sizeof(DataType));
+
+ new->intval = old->intval;
+ new->strval = RedisModule_CreateStringFromString(NULL, old->strval);
+
+ /* Breaking the rules here! We return a copy that also includes traces
+ * of fromkey/tokey to confirm we get what we expect.
+ */
+ size_t len;
+ const char *str = RedisModule_StringPtrLen(fromkey, &len);
+ RedisModule_StringAppendBuffer(NULL, new->strval, "/", 1);
+ RedisModule_StringAppendBuffer(NULL, new->strval, str, len);
+ RedisModule_StringAppendBuffer(NULL, new->strval, "/", 1);
+ str = RedisModule_StringPtrLen(tokey, &len);
+ RedisModule_StringAppendBuffer(NULL, new->strval, str, len);
+
+ return new;
+}
+
+static int datatype_set(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 4) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ long long intval;
+
+ if (RedisModule_StringToLongLong(argv[2], &intval) != REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "Invalid integer value");
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+ DataType *dt = RedisModule_Calloc(sizeof(DataType), 1);
+ dt->intval = intval;
+ dt->strval = argv[3];
+ RedisModule_RetainString(ctx, dt->strval);
+
+ RedisModule_ModuleTypeSetValue(key, datatype, dt);
+ RedisModule_CloseKey(key);
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+
+ return REDISMODULE_OK;
+}
+
+static int datatype_restore(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 4) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ long long encver;
+ if (RedisModule_StringToLongLong(argv[3], &encver) != REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "Invalid integer value");
+ return REDISMODULE_OK;
+ }
+
+ DataType *dt = RedisModule_LoadDataTypeFromStringEncver(argv[2], datatype, encver);
+ if (!dt) {
+ RedisModule_ReplyWithError(ctx, "Invalid data");
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+ RedisModule_ModuleTypeSetValue(key, datatype, dt);
+ RedisModule_CloseKey(key);
+ RedisModule_ReplyWithLongLong(ctx, load_encver);
+
+ return REDISMODULE_OK;
+}
+
+static int datatype_get(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_READ);
+ DataType *dt = RedisModule_ModuleTypeGetValue(key);
+ RedisModule_CloseKey(key);
+
+ if (!dt) {
+ RedisModule_ReplyWithNullArray(ctx);
+ } else {
+ RedisModule_ReplyWithArray(ctx, 2);
+ RedisModule_ReplyWithLongLong(ctx, dt->intval);
+ RedisModule_ReplyWithString(ctx, dt->strval);
+ }
+ return REDISMODULE_OK;
+}
+
+static int datatype_dump(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_READ);
+ DataType *dt = RedisModule_ModuleTypeGetValue(key);
+ RedisModule_CloseKey(key);
+
+ RedisModuleString *reply = RedisModule_SaveDataTypeToString(ctx, dt, datatype);
+ if (!reply) {
+ RedisModule_ReplyWithError(ctx, "Failed to save");
+ return REDISMODULE_OK;
+ }
+
+ RedisModule_ReplyWithString(ctx, reply);
+ RedisModule_FreeString(ctx, reply);
+ return REDISMODULE_OK;
+}
+
+static int datatype_swap(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 3) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleKey *a = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+ RedisModuleKey *b = RedisModule_OpenKey(ctx, argv[2], REDISMODULE_WRITE);
+ void *val = RedisModule_ModuleTypeGetValue(a);
+
+ int error = (RedisModule_ModuleTypeReplaceValue(b, datatype, val, &val) == REDISMODULE_ERR ||
+ RedisModule_ModuleTypeReplaceValue(a, datatype, val, NULL) == REDISMODULE_ERR);
+ if (!error)
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ else
+ RedisModule_ReplyWithError(ctx, "ERR failed");
+
+ RedisModule_CloseKey(a);
+ RedisModule_CloseKey(b);
+
+ return REDISMODULE_OK;
+}
+
+/* used to enable or disable slow loading */
+static int datatype_slow_loading(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ long long ll;
+ if (RedisModule_StringToLongLong(argv[1], &ll) != REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "Invalid integer value");
+ return REDISMODULE_OK;
+ }
+ slow_loading = ll;
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+/* used to test if we reached the slow loading code */
+static int datatype_is_in_slow_loading(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ if (argc != 1) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ RedisModule_ReplyWithLongLong(ctx, is_in_slow_loading);
+ return REDISMODULE_OK;
+}
+
+int createDataTypeBlockCheck(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ static RedisModuleType *datatype_outside_onload = NULL;
+
+ RedisModuleTypeMethods datatype_methods = {
+ .version = REDISMODULE_TYPE_METHOD_VERSION,
+ .rdb_load = datatype_load,
+ .rdb_save = datatype_save,
+ .free = datatype_free,
+ .copy = datatype_copy
+ };
+
+ datatype_outside_onload = RedisModule_CreateDataType(ctx, "test_dt_outside_onload", 1, &datatype_methods);
+
+ /* This validates that it's not possible to create datatype outside OnLoad,
+ * thus returns an error if it succeeds. */
+ if (datatype_outside_onload == NULL) {
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ } else {
+ RedisModule_ReplyWithError(ctx, "UNEXPECTEDOK");
+ }
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx,"datatype",DATATYPE_ENC_VER,REDISMODULE_APIVER_1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ /* Creates a command which creates a datatype outside OnLoad() function. */
+ if (RedisModule_CreateCommand(ctx,"block.create.datatype.outside.onload", createDataTypeBlockCheck, "write", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ RedisModule_SetModuleOptions(ctx, REDISMODULE_OPTIONS_HANDLE_IO_ERRORS);
+
+ RedisModuleTypeMethods datatype_methods = {
+ .version = REDISMODULE_TYPE_METHOD_VERSION,
+ .rdb_load = datatype_load,
+ .rdb_save = datatype_save,
+ .free = datatype_free,
+ .copy = datatype_copy
+ };
+
+ datatype = RedisModule_CreateDataType(ctx, "test___dt", 1, &datatype_methods);
+ if (datatype == NULL)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"datatype.set", datatype_set,
+ "write deny-oom", 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"datatype.get", datatype_get,"",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"datatype.restore", datatype_restore,
+ "write deny-oom", 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"datatype.dump", datatype_dump,"",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "datatype.swap", datatype_swap,
+ "write", 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "datatype.slow_loading", datatype_slow_loading,
+ "allow-loading", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "datatype.is_in_slow_loading", datatype_is_in_slow_loading,
+ "allow-loading", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/datatype2.c b/tests/modules/datatype2.c
new file mode 100644
index 0000000..bc0dc3d
--- /dev/null
+++ b/tests/modules/datatype2.c
@@ -0,0 +1,739 @@
+/* This module is used to test a use case of a module that stores information
+ * about keys in global memory, and relies on the enhanced data type callbacks to
+ * get key name and dbid on various operations.
+ *
+ * it simulates a simple memory allocator. The smallest allocation unit of
+ * the allocator is a mem block with a size of 4KB. Multiple mem blocks are combined
+ * using a linked list. These linked lists are placed in a global dict named 'mem_pool'.
+ * Each db has a 'mem_pool'. You can use the 'mem.alloc' command to allocate a specified
+ * number of mem blocks, and use 'mem.free' to release the memory. Use 'mem.write', 'mem.read'
+ * to write and read the specified mem block (note that each mem block can only be written once).
+ * Use 'mem.usage' to get the memory usage under different dbs, and it will return the size
+ * mem blocks and used mem blocks under the db.
+ * The specific structure diagram is as follows:
+ *
+ *
+ * Global variables of the module:
+ *
+ * mem blocks link
+ * ┌─────┬─────┐
+ * │ │ │ ┌───┐ ┌───┐ ┌───┐
+ * │ k1 │ ───┼───►│4KB├───►│4KB├───►│4KB│
+ * │ │ │ └───┘ └───┘ └───┘
+ * ├─────┼─────┤
+ * ┌───────┐ ┌────► │ │ │ ┌───┐ ┌───┐
+ * │ │ │ │ k2 │ ───┼───►│4KB├───►│4KB│
+ * │ db0 ├──────┘ │ │ │ └───┘ └───┘
+ * │ │ ├─────┼─────┤
+ * ├───────┤ │ │ │ ┌───┐ ┌───┐ ┌───┐
+ * │ │ │ k3 │ ───┼───►│4KB├───►│4KB├───►│4KB│
+ * │ db1 ├──►null │ │ │ └───┘ └───┘ └───┘
+ * │ │ └─────┴─────┘
+ * ├───────┤ dict
+ * │ │
+ * │ db2 ├─────────┐
+ * │ │ │
+ * ├───────┤ │ ┌─────┬─────┐
+ * │ │ │ │ │ │ ┌───┐ ┌───┐ ┌───┐
+ * │ db3 ├──►null │ │ k1 │ ───┼───►│4KB├───►│4KB├───►│4KB│
+ * │ │ │ │ │ │ └───┘ └───┘ └───┘
+ * └───────┘ │ ├─────┼─────┤
+ * mem_pool[MAX_DB] │ │ │ │ ┌───┐ ┌───┐
+ * └──►│ k2 │ ───┼───►│4KB├───►│4KB│
+ * │ │ │ └───┘ └───┘
+ * └─────┴─────┘
+ * dict
+ *
+ *
+ * Keys in redis database:
+ *
+ * ┌───────┐
+ * │ size │
+ * ┌───────────►│ used │
+ * │ │ mask │
+ * ┌─────┬─────┐ │ └───────┘ ┌───────┐
+ * │ │ │ │ MemAllocObject │ size │
+ * │ k1 │ ───┼─┘ ┌───────────►│ used │
+ * │ │ │ │ │ mask │
+ * ├─────┼─────┤ ┌───────┐ ┌─────┬─────┐ │ └───────┘
+ * │ │ │ │ size │ │ │ │ │ MemAllocObject
+ * │ k2 │ ───┼─────────────►│ used │ │ k1 │ ───┼─┘
+ * │ │ │ │ mask │ │ │ │
+ * ├─────┼─────┤ └───────┘ ├─────┼─────┤
+ * │ │ │ MemAllocObject │ │ │
+ * │ k3 │ ───┼─┐ │ k2 │ ───┼─┐
+ * │ │ │ │ │ │ │ │
+ * └─────┴─────┘ │ ┌───────┐ └─────┴─────┘ │ ┌───────┐
+ * redis db[0] │ │ size │ redis db[1] │ │ size │
+ * └───────────►│ used │ └───────────►│ used │
+ * │ mask │ │ mask │
+ * └───────┘ └───────┘
+ * MemAllocObject MemAllocObject
+ *
+ **/
+
+#include "redismodule.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <stdint.h>
+
+static RedisModuleType *MemAllocType;
+
+#define MAX_DB 16
+RedisModuleDict *mem_pool[MAX_DB];
+typedef struct MemAllocObject {
+ long long size;
+ long long used;
+ uint64_t mask;
+} MemAllocObject;
+
+MemAllocObject *createMemAllocObject(void) {
+ MemAllocObject *o = RedisModule_Calloc(1, sizeof(*o));
+ return o;
+}
+
+/*---------------------------- mem block apis ------------------------------------*/
+#define BLOCK_SIZE 4096
+struct MemBlock {
+ char block[BLOCK_SIZE];
+ struct MemBlock *next;
+};
+
+void MemBlockFree(struct MemBlock *head) {
+ if (head) {
+ struct MemBlock *block = head->next, *next;
+ RedisModule_Free(head);
+ while (block) {
+ next = block->next;
+ RedisModule_Free(block);
+ block = next;
+ }
+ }
+}
+struct MemBlock *MemBlockCreate(long long num) {
+ if (num <= 0) {
+ return NULL;
+ }
+
+ struct MemBlock *head = RedisModule_Calloc(1, sizeof(struct MemBlock));
+ struct MemBlock *block = head;
+ while (--num) {
+ block->next = RedisModule_Calloc(1, sizeof(struct MemBlock));
+ block = block->next;
+ }
+
+ return head;
+}
+
+long long MemBlockNum(const struct MemBlock *head) {
+ long long num = 0;
+ const struct MemBlock *block = head;
+ while (block) {
+ num++;
+ block = block->next;
+ }
+
+ return num;
+}
+
+size_t MemBlockWrite(struct MemBlock *head, long long block_index, const char *data, size_t size) {
+ size_t w_size = 0;
+ struct MemBlock *block = head;
+ while (block_index-- && block) {
+ block = block->next;
+ }
+
+ if (block) {
+ size = size > BLOCK_SIZE ? BLOCK_SIZE:size;
+ memcpy(block->block, data, size);
+ w_size += size;
+ }
+
+ return w_size;
+}
+
+int MemBlockRead(struct MemBlock *head, long long block_index, char *data, size_t size) {
+ size_t r_size = 0;
+ struct MemBlock *block = head;
+ while (block_index-- && block) {
+ block = block->next;
+ }
+
+ if (block) {
+ size = size > BLOCK_SIZE ? BLOCK_SIZE:size;
+ memcpy(data, block->block, size);
+ r_size += size;
+ }
+
+ return r_size;
+}
+
+void MemPoolFreeDb(RedisModuleCtx *ctx, int dbid) {
+ RedisModuleString *key;
+ void *tdata;
+ RedisModuleDictIter *iter = RedisModule_DictIteratorStartC(mem_pool[dbid], "^", NULL, 0);
+ while((key = RedisModule_DictNext(ctx, iter, &tdata)) != NULL) {
+ MemBlockFree((struct MemBlock *)tdata);
+ }
+ RedisModule_DictIteratorStop(iter);
+ RedisModule_FreeDict(NULL, mem_pool[dbid]);
+ mem_pool[dbid] = RedisModule_CreateDict(NULL);
+}
+
+struct MemBlock *MemBlockClone(const struct MemBlock *head) {
+ struct MemBlock *newhead = NULL;
+ if (head) {
+ newhead = RedisModule_Calloc(1, sizeof(struct MemBlock));
+ memcpy(newhead->block, head->block, BLOCK_SIZE);
+ struct MemBlock *newblock = newhead;
+ const struct MemBlock *oldblock = head->next;
+ while (oldblock) {
+ newblock->next = RedisModule_Calloc(1, sizeof(struct MemBlock));
+ newblock = newblock->next;
+ memcpy(newblock->block, oldblock->block, BLOCK_SIZE);
+ oldblock = oldblock->next;
+ }
+ }
+
+ return newhead;
+}
+
+/*---------------------------- event handler ------------------------------------*/
+void swapDbCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data) {
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(e);
+ REDISMODULE_NOT_USED(sub);
+
+ RedisModuleSwapDbInfo *ei = data;
+
+ // swap
+ RedisModuleDict *tmp = mem_pool[ei->dbnum_first];
+ mem_pool[ei->dbnum_first] = mem_pool[ei->dbnum_second];
+ mem_pool[ei->dbnum_second] = tmp;
+}
+
+void flushdbCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data) {
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(e);
+ int i;
+ RedisModuleFlushInfo *fi = data;
+
+ RedisModule_AutoMemory(ctx);
+
+ if (sub == REDISMODULE_SUBEVENT_FLUSHDB_START) {
+ if (fi->dbnum != -1) {
+ MemPoolFreeDb(ctx, fi->dbnum);
+ } else {
+ for (i = 0; i < MAX_DB; i++) {
+ MemPoolFreeDb(ctx, i);
+ }
+ }
+ }
+}
+
+/*---------------------------- command implementation ------------------------------------*/
+
+/* MEM.ALLOC key block_num */
+int MemAlloc_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ RedisModule_AutoMemory(ctx);
+
+ if (argc != 3) {
+ return RedisModule_WrongArity(ctx);
+ }
+
+ long long block_num;
+ if ((RedisModule_StringToLongLong(argv[2], &block_num) != REDISMODULE_OK) || block_num <= 0) {
+ return RedisModule_ReplyWithError(ctx, "ERR invalid block_num: must be a value greater than 0");
+ }
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_READ | REDISMODULE_WRITE);
+ int type = RedisModule_KeyType(key);
+ if (type != REDISMODULE_KEYTYPE_EMPTY && RedisModule_ModuleTypeGetType(key) != MemAllocType) {
+ return RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
+ }
+
+ MemAllocObject *o;
+ if (type == REDISMODULE_KEYTYPE_EMPTY) {
+ o = createMemAllocObject();
+ RedisModule_ModuleTypeSetValue(key, MemAllocType, o);
+ } else {
+ o = RedisModule_ModuleTypeGetValue(key);
+ }
+
+ struct MemBlock *mem = MemBlockCreate(block_num);
+ RedisModule_Assert(mem != NULL);
+ RedisModule_DictSet(mem_pool[RedisModule_GetSelectedDb(ctx)], argv[1], mem);
+ o->size = block_num;
+ o->used = 0;
+ o->mask = 0;
+
+ RedisModule_ReplyWithLongLong(ctx, block_num);
+ RedisModule_ReplicateVerbatim(ctx);
+ return REDISMODULE_OK;
+}
+
+/* MEM.FREE key */
+int MemFree_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ RedisModule_AutoMemory(ctx);
+
+ if (argc != 2) {
+ return RedisModule_WrongArity(ctx);
+ }
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_READ);
+ int type = RedisModule_KeyType(key);
+ if (type != REDISMODULE_KEYTYPE_EMPTY && RedisModule_ModuleTypeGetType(key) != MemAllocType) {
+ return RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
+ }
+
+ int ret = 0;
+ MemAllocObject *o;
+ if (type == REDISMODULE_KEYTYPE_EMPTY) {
+ RedisModule_ReplyWithLongLong(ctx, ret);
+ return REDISMODULE_OK;
+ } else {
+ o = RedisModule_ModuleTypeGetValue(key);
+ }
+
+ int nokey;
+ struct MemBlock *mem = (struct MemBlock *)RedisModule_DictGet(mem_pool[RedisModule_GetSelectedDb(ctx)], argv[1], &nokey);
+ if (!nokey && mem) {
+ RedisModule_DictDel(mem_pool[RedisModule_GetSelectedDb(ctx)], argv[1], NULL);
+ MemBlockFree(mem);
+ o->used = 0;
+ o->size = 0;
+ o->mask = 0;
+ ret = 1;
+ }
+
+ RedisModule_ReplyWithLongLong(ctx, ret);
+ RedisModule_ReplicateVerbatim(ctx);
+ return REDISMODULE_OK;
+}
+
+/* MEM.WRITE key block_index data */
+int MemWrite_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ RedisModule_AutoMemory(ctx);
+
+ if (argc != 4) {
+ return RedisModule_WrongArity(ctx);
+ }
+
+ long long block_index;
+ if ((RedisModule_StringToLongLong(argv[2], &block_index) != REDISMODULE_OK) || block_index < 0) {
+ return RedisModule_ReplyWithError(ctx, "ERR invalid block_index: must be a value greater than 0");
+ }
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_READ | REDISMODULE_WRITE);
+ int type = RedisModule_KeyType(key);
+ if (type != REDISMODULE_KEYTYPE_EMPTY && RedisModule_ModuleTypeGetType(key) != MemAllocType) {
+ return RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
+ }
+
+ MemAllocObject *o;
+ if (type == REDISMODULE_KEYTYPE_EMPTY) {
+ return RedisModule_ReplyWithError(ctx, "ERR Memory has not been allocated");
+ } else {
+ o = RedisModule_ModuleTypeGetValue(key);
+ }
+
+ if (o->mask & (1UL << block_index)) {
+ return RedisModule_ReplyWithError(ctx, "ERR block is busy");
+ }
+
+ int ret = 0;
+ int nokey;
+ struct MemBlock *mem = (struct MemBlock *)RedisModule_DictGet(mem_pool[RedisModule_GetSelectedDb(ctx)], argv[1], &nokey);
+ if (!nokey && mem) {
+ size_t len;
+ const char *buf = RedisModule_StringPtrLen(argv[3], &len);
+ ret = MemBlockWrite(mem, block_index, buf, len);
+ o->mask |= (1UL << block_index);
+ o->used++;
+ }
+
+ RedisModule_ReplyWithLongLong(ctx, ret);
+ RedisModule_ReplicateVerbatim(ctx);
+ return REDISMODULE_OK;
+}
+
+/* MEM.READ key block_index */
+int MemRead_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ RedisModule_AutoMemory(ctx);
+
+ if (argc != 3) {
+ return RedisModule_WrongArity(ctx);
+ }
+
+ long long block_index;
+ if ((RedisModule_StringToLongLong(argv[2], &block_index) != REDISMODULE_OK) || block_index < 0) {
+ return RedisModule_ReplyWithError(ctx, "ERR invalid block_index: must be a value greater than 0");
+ }
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_READ);
+ int type = RedisModule_KeyType(key);
+ if (type != REDISMODULE_KEYTYPE_EMPTY && RedisModule_ModuleTypeGetType(key) != MemAllocType) {
+ return RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
+ }
+
+ MemAllocObject *o;
+ if (type == REDISMODULE_KEYTYPE_EMPTY) {
+ return RedisModule_ReplyWithError(ctx, "ERR Memory has not been allocated");
+ } else {
+ o = RedisModule_ModuleTypeGetValue(key);
+ }
+
+ if (!(o->mask & (1UL << block_index))) {
+ return RedisModule_ReplyWithNull(ctx);
+ }
+
+ int nokey;
+ struct MemBlock *mem = (struct MemBlock *)RedisModule_DictGet(mem_pool[RedisModule_GetSelectedDb(ctx)], argv[1], &nokey);
+ RedisModule_Assert(nokey == 0 && mem != NULL);
+
+ char buf[BLOCK_SIZE];
+ MemBlockRead(mem, block_index, buf, sizeof(buf));
+
+ /* Assuming that the contents are all c-style strings */
+ RedisModule_ReplyWithStringBuffer(ctx, buf, strlen(buf));
+ return REDISMODULE_OK;
+}
+
+/* MEM.USAGE dbid */
+int MemUsage_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ RedisModule_AutoMemory(ctx);
+
+ if (argc != 2) {
+ return RedisModule_WrongArity(ctx);
+ }
+
+ long long dbid;
+ if ((RedisModule_StringToLongLong(argv[1], (long long *)&dbid) != REDISMODULE_OK)) {
+ return RedisModule_ReplyWithError(ctx, "ERR invalid value: must be a integer");
+ }
+
+ if (dbid < 0 || dbid >= MAX_DB) {
+ return RedisModule_ReplyWithError(ctx, "ERR dbid out of range");
+ }
+
+
+ long long size = 0, used = 0;
+
+ void *data;
+ RedisModuleString *key;
+ RedisModuleDictIter *iter = RedisModule_DictIteratorStartC(mem_pool[dbid], "^", NULL, 0);
+ while((key = RedisModule_DictNext(ctx, iter, &data)) != NULL) {
+ int dbbackup = RedisModule_GetSelectedDb(ctx);
+ RedisModule_SelectDb(ctx, dbid);
+ RedisModuleKey *openkey = RedisModule_OpenKey(ctx, key, REDISMODULE_READ);
+ int type = RedisModule_KeyType(openkey);
+ RedisModule_Assert(type != REDISMODULE_KEYTYPE_EMPTY && RedisModule_ModuleTypeGetType(openkey) == MemAllocType);
+ MemAllocObject *o = RedisModule_ModuleTypeGetValue(openkey);
+ used += o->used;
+ size += o->size;
+ RedisModule_CloseKey(openkey);
+ RedisModule_SelectDb(ctx, dbbackup);
+ }
+ RedisModule_DictIteratorStop(iter);
+
+ RedisModule_ReplyWithArray(ctx, 4);
+ RedisModule_ReplyWithSimpleString(ctx, "total");
+ RedisModule_ReplyWithLongLong(ctx, size);
+ RedisModule_ReplyWithSimpleString(ctx, "used");
+ RedisModule_ReplyWithLongLong(ctx, used);
+ return REDISMODULE_OK;
+}
+
+/* MEM.ALLOCANDWRITE key block_num block_index data block_index data ... */
+int MemAllocAndWrite_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ RedisModule_AutoMemory(ctx);
+
+ if (argc < 3) {
+ return RedisModule_WrongArity(ctx);
+ }
+
+ long long block_num;
+ if ((RedisModule_StringToLongLong(argv[2], &block_num) != REDISMODULE_OK) || block_num <= 0) {
+ return RedisModule_ReplyWithError(ctx, "ERR invalid block_num: must be a value greater than 0");
+ }
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_READ | REDISMODULE_WRITE);
+ int type = RedisModule_KeyType(key);
+ if (type != REDISMODULE_KEYTYPE_EMPTY && RedisModule_ModuleTypeGetType(key) != MemAllocType) {
+ return RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
+ }
+
+ MemAllocObject *o;
+ if (type == REDISMODULE_KEYTYPE_EMPTY) {
+ o = createMemAllocObject();
+ RedisModule_ModuleTypeSetValue(key, MemAllocType, o);
+ } else {
+ o = RedisModule_ModuleTypeGetValue(key);
+ }
+
+ struct MemBlock *mem = MemBlockCreate(block_num);
+ RedisModule_Assert(mem != NULL);
+ RedisModule_DictSet(mem_pool[RedisModule_GetSelectedDb(ctx)], argv[1], mem);
+ o->used = 0;
+ o->mask = 0;
+ o->size = block_num;
+
+ int i = 3;
+ long long block_index;
+ for (; i < argc; i++) {
+ /* Security is guaranteed internally, so no security check. */
+ RedisModule_StringToLongLong(argv[i], &block_index);
+ size_t len;
+ const char * buf = RedisModule_StringPtrLen(argv[i + 1], &len);
+ MemBlockWrite(mem, block_index, buf, len);
+ o->used++;
+ o->mask |= (1UL << block_index);
+ }
+
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ RedisModule_ReplicateVerbatim(ctx);
+ return REDISMODULE_OK;
+}
+
+/*---------------------------- type callbacks ------------------------------------*/
+
+void *MemAllocRdbLoad(RedisModuleIO *rdb, int encver) {
+ if (encver != 0) {
+ return NULL;
+ }
+
+ MemAllocObject *o = createMemAllocObject();
+ o->size = RedisModule_LoadSigned(rdb);
+ o->used = RedisModule_LoadSigned(rdb);
+ o->mask = RedisModule_LoadUnsigned(rdb);
+
+ const RedisModuleString *key = RedisModule_GetKeyNameFromIO(rdb);
+ int dbid = RedisModule_GetDbIdFromIO(rdb);
+
+ if (o->size) {
+ size_t size;
+ char *tmpbuf;
+ long long num = o->size;
+ struct MemBlock *head = RedisModule_Calloc(1, sizeof(struct MemBlock));
+ tmpbuf = RedisModule_LoadStringBuffer(rdb, &size);
+ memcpy(head->block, tmpbuf, size > BLOCK_SIZE ? BLOCK_SIZE:size);
+ RedisModule_Free(tmpbuf);
+ struct MemBlock *block = head;
+ while (--num) {
+ block->next = RedisModule_Calloc(1, sizeof(struct MemBlock));
+ block = block->next;
+
+ tmpbuf = RedisModule_LoadStringBuffer(rdb, &size);
+ memcpy(block->block, tmpbuf, size > BLOCK_SIZE ? BLOCK_SIZE:size);
+ RedisModule_Free(tmpbuf);
+ }
+
+ RedisModule_DictSet(mem_pool[dbid], (RedisModuleString *)key, head);
+ }
+
+ return o;
+}
+
+void MemAllocRdbSave(RedisModuleIO *rdb, void *value) {
+ MemAllocObject *o = value;
+ RedisModule_SaveSigned(rdb, o->size);
+ RedisModule_SaveSigned(rdb, o->used);
+ RedisModule_SaveUnsigned(rdb, o->mask);
+
+ const RedisModuleString *key = RedisModule_GetKeyNameFromIO(rdb);
+ int dbid = RedisModule_GetDbIdFromIO(rdb);
+
+ if (o->size) {
+ int nokey;
+ struct MemBlock *mem = (struct MemBlock *)RedisModule_DictGet(mem_pool[dbid], (RedisModuleString *)key, &nokey);
+ RedisModule_Assert(nokey == 0 && mem != NULL);
+
+ struct MemBlock *block = mem;
+ while (block) {
+ RedisModule_SaveStringBuffer(rdb, block->block, BLOCK_SIZE);
+ block = block->next;
+ }
+ }
+}
+
+void MemAllocAofRewrite(RedisModuleIO *aof, RedisModuleString *key, void *value) {
+ MemAllocObject *o = (MemAllocObject *)value;
+ if (o->size) {
+ int dbid = RedisModule_GetDbIdFromIO(aof);
+ int nokey;
+ size_t i = 0, j = 0;
+ struct MemBlock *mem = (struct MemBlock *)RedisModule_DictGet(mem_pool[dbid], (RedisModuleString *)key, &nokey);
+ RedisModule_Assert(nokey == 0 && mem != NULL);
+ size_t array_size = o->size * 2;
+ RedisModuleString ** string_array = RedisModule_Calloc(array_size, sizeof(RedisModuleString *));
+ while (mem) {
+ string_array[i] = RedisModule_CreateStringFromLongLong(NULL, j);
+ string_array[i + 1] = RedisModule_CreateString(NULL, mem->block, BLOCK_SIZE);
+ mem = mem->next;
+ i += 2;
+ j++;
+ }
+ RedisModule_EmitAOF(aof, "mem.allocandwrite", "slv", key, o->size, string_array, array_size);
+ for (i = 0; i < array_size; i++) {
+ RedisModule_FreeString(NULL, string_array[i]);
+ }
+ RedisModule_Free(string_array);
+ } else {
+ RedisModule_EmitAOF(aof, "mem.allocandwrite", "sl", key, o->size);
+ }
+}
+
+void MemAllocFree(void *value) {
+ RedisModule_Free(value);
+}
+
+void MemAllocUnlink(RedisModuleString *key, const void *value) {
+ REDISMODULE_NOT_USED(key);
+ REDISMODULE_NOT_USED(value);
+
+ /* When unlink and unlink2 exist at the same time, we will only call unlink2. */
+ RedisModule_Assert(0);
+}
+
+void MemAllocUnlink2(RedisModuleKeyOptCtx *ctx, const void *value) {
+ MemAllocObject *o = (MemAllocObject *)value;
+
+ const RedisModuleString *key = RedisModule_GetKeyNameFromOptCtx(ctx);
+ int dbid = RedisModule_GetDbIdFromOptCtx(ctx);
+
+ if (o->size) {
+ void *oldval;
+ RedisModule_DictDel(mem_pool[dbid], (RedisModuleString *)key, &oldval);
+ RedisModule_Assert(oldval != NULL);
+ MemBlockFree((struct MemBlock *)oldval);
+ }
+}
+
+void MemAllocDigest(RedisModuleDigest *md, void *value) {
+ MemAllocObject *o = (MemAllocObject *)value;
+ RedisModule_DigestAddLongLong(md, o->size);
+ RedisModule_DigestAddLongLong(md, o->used);
+ RedisModule_DigestAddLongLong(md, o->mask);
+
+ int dbid = RedisModule_GetDbIdFromDigest(md);
+ const RedisModuleString *key = RedisModule_GetKeyNameFromDigest(md);
+
+ if (o->size) {
+ int nokey;
+ struct MemBlock *mem = (struct MemBlock *)RedisModule_DictGet(mem_pool[dbid], (RedisModuleString *)key, &nokey);
+ RedisModule_Assert(nokey == 0 && mem != NULL);
+
+ struct MemBlock *block = mem;
+ while (block) {
+ RedisModule_DigestAddStringBuffer(md, (const char *)block->block, BLOCK_SIZE);
+ block = block->next;
+ }
+ }
+}
+
+void *MemAllocCopy2(RedisModuleKeyOptCtx *ctx, const void *value) {
+ const MemAllocObject *old = value;
+ MemAllocObject *new = createMemAllocObject();
+ new->size = old->size;
+ new->used = old->used;
+ new->mask = old->mask;
+
+ int from_dbid = RedisModule_GetDbIdFromOptCtx(ctx);
+ int to_dbid = RedisModule_GetToDbIdFromOptCtx(ctx);
+ const RedisModuleString *fromkey = RedisModule_GetKeyNameFromOptCtx(ctx);
+ const RedisModuleString *tokey = RedisModule_GetToKeyNameFromOptCtx(ctx);
+
+ if (old->size) {
+ int nokey;
+ struct MemBlock *oldmem = (struct MemBlock *)RedisModule_DictGet(mem_pool[from_dbid], (RedisModuleString *)fromkey, &nokey);
+ RedisModule_Assert(nokey == 0 && oldmem != NULL);
+ struct MemBlock *newmem = MemBlockClone(oldmem);
+ RedisModule_Assert(newmem != NULL);
+ RedisModule_DictSet(mem_pool[to_dbid], (RedisModuleString *)tokey, newmem);
+ }
+
+ return new;
+}
+
+size_t MemAllocMemUsage2(RedisModuleKeyOptCtx *ctx, const void *value, size_t sample_size) {
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(sample_size);
+ uint64_t size = 0;
+ MemAllocObject *o = (MemAllocObject *)value;
+
+ size += sizeof(*o);
+ size += o->size * sizeof(struct MemBlock);
+
+ return size;
+}
+
+size_t MemAllocMemFreeEffort2(RedisModuleKeyOptCtx *ctx, const void *value) {
+ REDISMODULE_NOT_USED(ctx);
+ MemAllocObject *o = (MemAllocObject *)value;
+ return o->size;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx, "datatype2", 1,REDISMODULE_APIVER_1) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+
+ RedisModuleTypeMethods tm = {
+ .version = REDISMODULE_TYPE_METHOD_VERSION,
+ .rdb_load = MemAllocRdbLoad,
+ .rdb_save = MemAllocRdbSave,
+ .aof_rewrite = MemAllocAofRewrite,
+ .free = MemAllocFree,
+ .digest = MemAllocDigest,
+ .unlink = MemAllocUnlink,
+ // .defrag = MemAllocDefrag, // Tested in defragtest.c
+ .unlink2 = MemAllocUnlink2,
+ .copy2 = MemAllocCopy2,
+ .mem_usage2 = MemAllocMemUsage2,
+ .free_effort2 = MemAllocMemFreeEffort2,
+ };
+
+ MemAllocType = RedisModule_CreateDataType(ctx, "mem_alloc", 0, &tm);
+ if (MemAllocType == NULL) {
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx, "mem.alloc", MemAlloc_RedisCommand, "write deny-oom", 1, 1, 1) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx, "mem.free", MemFree_RedisCommand, "write deny-oom", 1, 1, 1) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx, "mem.write", MemWrite_RedisCommand, "write deny-oom", 1, 1, 1) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx, "mem.read", MemRead_RedisCommand, "readonly", 1, 1, 1) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx, "mem.usage", MemUsage_RedisCommand, "readonly", 1, 1, 1) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+
+ /* used for internal aof rewrite */
+ if (RedisModule_CreateCommand(ctx, "mem.allocandwrite", MemAllocAndWrite_RedisCommand, "write deny-oom", 1, 1, 1) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+
+ for(int i = 0; i < MAX_DB; i++){
+ mem_pool[i] = RedisModule_CreateDict(NULL);
+ }
+
+ RedisModule_SubscribeToServerEvent(ctx, RedisModuleEvent_FlushDB, flushdbCallback);
+ RedisModule_SubscribeToServerEvent(ctx, RedisModuleEvent_SwapDB, swapDbCallback);
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/defragtest.c b/tests/modules/defragtest.c
new file mode 100644
index 0000000..6a02a05
--- /dev/null
+++ b/tests/modules/defragtest.c
@@ -0,0 +1,235 @@
+/* A module that implements defrag callback mechanisms.
+ */
+
+#include "redismodule.h"
+#include <stdlib.h>
+
+static RedisModuleType *FragType;
+
+struct FragObject {
+ unsigned long len;
+ void **values;
+ int maxstep;
+};
+
+/* Make sure we get the expected cursor */
+unsigned long int last_set_cursor = 0;
+
+unsigned long int datatype_attempts = 0;
+unsigned long int datatype_defragged = 0;
+unsigned long int datatype_resumes = 0;
+unsigned long int datatype_wrong_cursor = 0;
+unsigned long int global_attempts = 0;
+unsigned long int global_defragged = 0;
+
+int global_strings_len = 0;
+RedisModuleString **global_strings = NULL;
+
+static void createGlobalStrings(RedisModuleCtx *ctx, int count)
+{
+ global_strings_len = count;
+ global_strings = RedisModule_Alloc(sizeof(RedisModuleString *) * count);
+
+ for (int i = 0; i < count; i++) {
+ global_strings[i] = RedisModule_CreateStringFromLongLong(ctx, i);
+ }
+}
+
+static void defragGlobalStrings(RedisModuleDefragCtx *ctx)
+{
+ for (int i = 0; i < global_strings_len; i++) {
+ RedisModuleString *new = RedisModule_DefragRedisModuleString(ctx, global_strings[i]);
+ global_attempts++;
+ if (new != NULL) {
+ global_strings[i] = new;
+ global_defragged++;
+ }
+ }
+}
+
+static void FragInfo(RedisModuleInfoCtx *ctx, int for_crash_report) {
+ REDISMODULE_NOT_USED(for_crash_report);
+
+ RedisModule_InfoAddSection(ctx, "stats");
+ RedisModule_InfoAddFieldLongLong(ctx, "datatype_attempts", datatype_attempts);
+ RedisModule_InfoAddFieldLongLong(ctx, "datatype_defragged", datatype_defragged);
+ RedisModule_InfoAddFieldLongLong(ctx, "datatype_resumes", datatype_resumes);
+ RedisModule_InfoAddFieldLongLong(ctx, "datatype_wrong_cursor", datatype_wrong_cursor);
+ RedisModule_InfoAddFieldLongLong(ctx, "global_attempts", global_attempts);
+ RedisModule_InfoAddFieldLongLong(ctx, "global_defragged", global_defragged);
+}
+
+struct FragObject *createFragObject(unsigned long len, unsigned long size, int maxstep) {
+ struct FragObject *o = RedisModule_Alloc(sizeof(*o));
+ o->len = len;
+ o->values = RedisModule_Alloc(sizeof(RedisModuleString*) * len);
+ o->maxstep = maxstep;
+
+ for (unsigned long i = 0; i < len; i++) {
+ o->values[i] = RedisModule_Calloc(1, size);
+ }
+
+ return o;
+}
+
+/* FRAG.RESETSTATS */
+static int fragResetStatsCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ datatype_attempts = 0;
+ datatype_defragged = 0;
+ datatype_resumes = 0;
+ datatype_wrong_cursor = 0;
+ global_attempts = 0;
+ global_defragged = 0;
+
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+/* FRAG.CREATE key len size maxstep */
+static int fragCreateCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 5)
+ return RedisModule_WrongArity(ctx);
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx,argv[1],
+ REDISMODULE_READ|REDISMODULE_WRITE);
+ int type = RedisModule_KeyType(key);
+ if (type != REDISMODULE_KEYTYPE_EMPTY)
+ {
+ return RedisModule_ReplyWithError(ctx, "ERR key exists");
+ }
+
+ long long len;
+ if ((RedisModule_StringToLongLong(argv[2], &len) != REDISMODULE_OK)) {
+ return RedisModule_ReplyWithError(ctx, "ERR invalid len");
+ }
+
+ long long size;
+ if ((RedisModule_StringToLongLong(argv[3], &size) != REDISMODULE_OK)) {
+ return RedisModule_ReplyWithError(ctx, "ERR invalid size");
+ }
+
+ long long maxstep;
+ if ((RedisModule_StringToLongLong(argv[4], &maxstep) != REDISMODULE_OK)) {
+ return RedisModule_ReplyWithError(ctx, "ERR invalid maxstep");
+ }
+
+ struct FragObject *o = createFragObject(len, size, maxstep);
+ RedisModule_ModuleTypeSetValue(key, FragType, o);
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ RedisModule_CloseKey(key);
+
+ return REDISMODULE_OK;
+}
+
+void FragFree(void *value) {
+ struct FragObject *o = value;
+
+ for (unsigned long i = 0; i < o->len; i++)
+ RedisModule_Free(o->values[i]);
+ RedisModule_Free(o->values);
+ RedisModule_Free(o);
+}
+
+size_t FragFreeEffort(RedisModuleString *key, const void *value) {
+ REDISMODULE_NOT_USED(key);
+
+ const struct FragObject *o = value;
+ return o->len;
+}
+
+int FragDefrag(RedisModuleDefragCtx *ctx, RedisModuleString *key, void **value) {
+ REDISMODULE_NOT_USED(key);
+ unsigned long i = 0;
+ int steps = 0;
+
+ int dbid = RedisModule_GetDbIdFromDefragCtx(ctx);
+ RedisModule_Assert(dbid != -1);
+
+ /* Attempt to get cursor, validate it's what we're exepcting */
+ if (RedisModule_DefragCursorGet(ctx, &i) == REDISMODULE_OK) {
+ if (i > 0) datatype_resumes++;
+
+ /* Validate we're expecting this cursor */
+ if (i != last_set_cursor) datatype_wrong_cursor++;
+ } else {
+ if (last_set_cursor != 0) datatype_wrong_cursor++;
+ }
+
+ /* Attempt to defrag the object itself */
+ datatype_attempts++;
+ struct FragObject *o = RedisModule_DefragAlloc(ctx, *value);
+ if (o == NULL) {
+ /* Not defragged */
+ o = *value;
+ } else {
+ /* Defragged */
+ *value = o;
+ datatype_defragged++;
+ }
+
+ /* Deep defrag now */
+ for (; i < o->len; i++) {
+ datatype_attempts++;
+ void *new = RedisModule_DefragAlloc(ctx, o->values[i]);
+ if (new) {
+ o->values[i] = new;
+ datatype_defragged++;
+ }
+
+ if ((o->maxstep && ++steps > o->maxstep) ||
+ ((i % 64 == 0) && RedisModule_DefragShouldStop(ctx)))
+ {
+ RedisModule_DefragCursorSet(ctx, i);
+ last_set_cursor = i;
+ return 1;
+ }
+ }
+
+ last_set_cursor = 0;
+ return 0;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx, "defragtest", 1, REDISMODULE_APIVER_1)
+ == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ if (RedisModule_GetTypeMethodVersion() < REDISMODULE_TYPE_METHOD_VERSION) {
+ return REDISMODULE_ERR;
+ }
+
+ long long glen;
+ if (argc != 1 || RedisModule_StringToLongLong(argv[0], &glen) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+
+ createGlobalStrings(ctx, glen);
+
+ RedisModuleTypeMethods tm = {
+ .version = REDISMODULE_TYPE_METHOD_VERSION,
+ .free = FragFree,
+ .free_effort = FragFreeEffort,
+ .defrag = FragDefrag
+ };
+
+ FragType = RedisModule_CreateDataType(ctx, "frag_type", 0, &tm);
+ if (FragType == NULL) return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "frag.create",
+ fragCreateCommand, "write deny-oom", 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "frag.resetstats",
+ fragResetStatsCommand, "write deny-oom", 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ RedisModule_RegisterInfoFunc(ctx, FragInfo);
+ RedisModule_RegisterDefragFunc(ctx, defragGlobalStrings);
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/eventloop.c b/tests/modules/eventloop.c
new file mode 100644
index 0000000..c0cfdf0
--- /dev/null
+++ b/tests/modules/eventloop.c
@@ -0,0 +1,276 @@
+/* This module contains four tests :
+ * 1- test.sanity : Basic tests for argument validation mostly.
+ * 2- test.sendbytes : Creates a pipe and registers its fds to the event loop,
+ * one end of the pipe for read events and the other end for
+ * the write events. On writable event, data is written. On
+ * readable event data is read. Repeated until all data is
+ * received.
+ * 3- test.iteration : A test for BEFORE_SLEEP and AFTER_SLEEP callbacks.
+ * Counters are incremented each time these events are
+ * fired. They should be equal and increment monotonically.
+ * 4- test.oneshot : Test for oneshot API
+ */
+
+#include "redismodule.h"
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <memory.h>
+#include <errno.h>
+
+int fds[2];
+long long buf_size;
+char *src;
+long long src_offset;
+char *dst;
+long long dst_offset;
+
+RedisModuleBlockedClient *bc;
+RedisModuleCtx *reply_ctx;
+
+void onReadable(int fd, void *user_data, int mask) {
+ REDISMODULE_NOT_USED(mask);
+
+ RedisModule_Assert(strcmp(user_data, "userdataread") == 0);
+
+ while (1) {
+ int rd = read(fd, dst + dst_offset, buf_size - dst_offset);
+ if (rd <= 0)
+ return;
+ dst_offset += rd;
+
+ /* Received all bytes */
+ if (dst_offset == buf_size) {
+ if (memcmp(src, dst, buf_size) == 0)
+ RedisModule_ReplyWithSimpleString(reply_ctx, "OK");
+ else
+ RedisModule_ReplyWithError(reply_ctx, "ERR bytes mismatch");
+
+ RedisModule_EventLoopDel(fds[0], REDISMODULE_EVENTLOOP_READABLE);
+ RedisModule_EventLoopDel(fds[1], REDISMODULE_EVENTLOOP_WRITABLE);
+ RedisModule_Free(src);
+ RedisModule_Free(dst);
+ close(fds[0]);
+ close(fds[1]);
+
+ RedisModule_FreeThreadSafeContext(reply_ctx);
+ RedisModule_UnblockClient(bc, NULL);
+ return;
+ }
+ };
+}
+
+void onWritable(int fd, void *user_data, int mask) {
+ REDISMODULE_NOT_USED(user_data);
+ REDISMODULE_NOT_USED(mask);
+
+ RedisModule_Assert(strcmp(user_data, "userdatawrite") == 0);
+
+ while (1) {
+ /* Check if we sent all data */
+ if (src_offset >= buf_size)
+ return;
+ int written = write(fd, src + src_offset, buf_size - src_offset);
+ if (written <= 0) {
+ return;
+ }
+
+ src_offset += written;
+ };
+}
+
+/* Create a pipe(), register pipe fds to the event loop and send/receive data
+ * using them. */
+int sendbytes(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ if (RedisModule_StringToLongLong(argv[1], &buf_size) != REDISMODULE_OK ||
+ buf_size == 0) {
+ RedisModule_ReplyWithError(ctx, "Invalid integer value");
+ return REDISMODULE_OK;
+ }
+
+ bc = RedisModule_BlockClient(ctx, NULL, NULL, NULL, 0);
+ reply_ctx = RedisModule_GetThreadSafeContext(bc);
+
+ /* Allocate source buffer and write some random data */
+ src = RedisModule_Calloc(1,buf_size);
+ src_offset = 0;
+ memset(src, rand() % 0xFF, buf_size);
+ memcpy(src, "randomtestdata", strlen("randomtestdata"));
+
+ dst = RedisModule_Calloc(1,buf_size);
+ dst_offset = 0;
+
+ /* Create a pipe and register it to the event loop. */
+ if (pipe(fds) < 0) return REDISMODULE_ERR;
+ if (fcntl(fds[0], F_SETFL, O_NONBLOCK) < 0) return REDISMODULE_ERR;
+ if (fcntl(fds[1], F_SETFL, O_NONBLOCK) < 0) return REDISMODULE_ERR;
+
+ if (RedisModule_EventLoopAdd(fds[0], REDISMODULE_EVENTLOOP_READABLE,
+ onReadable, "userdataread") != REDISMODULE_OK) return REDISMODULE_ERR;
+ if (RedisModule_EventLoopAdd(fds[1], REDISMODULE_EVENTLOOP_WRITABLE,
+ onWritable, "userdatawrite") != REDISMODULE_OK) return REDISMODULE_ERR;
+ return REDISMODULE_OK;
+}
+
+int sanity(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (pipe(fds) < 0) return REDISMODULE_ERR;
+
+ if (RedisModule_EventLoopAdd(fds[0], 9999999, onReadable, NULL)
+ == REDISMODULE_OK || errno != EINVAL) {
+ RedisModule_ReplyWithError(ctx, "ERR non-existing event type should fail");
+ goto out;
+ }
+ if (RedisModule_EventLoopAdd(-1, REDISMODULE_EVENTLOOP_READABLE, onReadable, NULL)
+ == REDISMODULE_OK || errno != ERANGE) {
+ RedisModule_ReplyWithError(ctx, "ERR out of range fd should fail");
+ goto out;
+ }
+ if (RedisModule_EventLoopAdd(99999999, REDISMODULE_EVENTLOOP_READABLE, onReadable, NULL)
+ == REDISMODULE_OK || errno != ERANGE) {
+ RedisModule_ReplyWithError(ctx, "ERR out of range fd should fail");
+ goto out;
+ }
+ if (RedisModule_EventLoopAdd(fds[0], REDISMODULE_EVENTLOOP_READABLE, NULL, NULL)
+ == REDISMODULE_OK || errno != EINVAL) {
+ RedisModule_ReplyWithError(ctx, "ERR null callback should fail");
+ goto out;
+ }
+ if (RedisModule_EventLoopAdd(fds[0], 9999999, onReadable, NULL)
+ == REDISMODULE_OK || errno != EINVAL) {
+ RedisModule_ReplyWithError(ctx, "ERR non-existing event type should fail");
+ goto out;
+ }
+ if (RedisModule_EventLoopDel(fds[0], REDISMODULE_EVENTLOOP_READABLE)
+ != REDISMODULE_OK || errno != 0) {
+ RedisModule_ReplyWithError(ctx, "ERR del on non-registered fd should not fail");
+ goto out;
+ }
+ if (RedisModule_EventLoopDel(fds[0], 9999999) == REDISMODULE_OK ||
+ errno != EINVAL) {
+ RedisModule_ReplyWithError(ctx, "ERR non-existing event type should fail");
+ goto out;
+ }
+ if (RedisModule_EventLoopDel(-1, REDISMODULE_EVENTLOOP_READABLE)
+ == REDISMODULE_OK || errno != ERANGE) {
+ RedisModule_ReplyWithError(ctx, "ERR out of range fd should fail");
+ goto out;
+ }
+ if (RedisModule_EventLoopDel(99999999, REDISMODULE_EVENTLOOP_READABLE)
+ == REDISMODULE_OK || errno != ERANGE) {
+ RedisModule_ReplyWithError(ctx, "ERR out of range fd should fail");
+ goto out;
+ }
+ if (RedisModule_EventLoopAdd(fds[0], REDISMODULE_EVENTLOOP_READABLE, onReadable, NULL)
+ != REDISMODULE_OK || errno != 0) {
+ RedisModule_ReplyWithError(ctx, "ERR Add failed");
+ goto out;
+ }
+ if (RedisModule_EventLoopAdd(fds[0], REDISMODULE_EVENTLOOP_READABLE, onReadable, NULL)
+ != REDISMODULE_OK || errno != 0) {
+ RedisModule_ReplyWithError(ctx, "ERR Adding same fd twice failed");
+ goto out;
+ }
+ if (RedisModule_EventLoopDel(fds[0], REDISMODULE_EVENTLOOP_READABLE)
+ != REDISMODULE_OK || errno != 0) {
+ RedisModule_ReplyWithError(ctx, "ERR Del failed");
+ goto out;
+ }
+ if (RedisModule_EventLoopAddOneShot(NULL, NULL) == REDISMODULE_OK || errno != EINVAL) {
+ RedisModule_ReplyWithError(ctx, "ERR null callback should fail");
+ goto out;
+ }
+
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+out:
+ close(fds[0]);
+ close(fds[1]);
+ return REDISMODULE_OK;
+}
+
+static long long beforeSleepCount;
+static long long afterSleepCount;
+
+int iteration(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ /* On each event loop iteration, eventloopCallback() is called. We increment
+ * beforeSleepCount and afterSleepCount, so these two should be equal.
+ * We reply with iteration count, caller can test if iteration count
+ * increments monotonically */
+ RedisModule_Assert(beforeSleepCount == afterSleepCount);
+ RedisModule_ReplyWithLongLong(ctx, beforeSleepCount);
+ return REDISMODULE_OK;
+}
+
+void oneshotCallback(void* arg)
+{
+ RedisModule_Assert(strcmp(arg, "userdata") == 0);
+ RedisModule_ReplyWithSimpleString(reply_ctx, "OK");
+ RedisModule_FreeThreadSafeContext(reply_ctx);
+ RedisModule_UnblockClient(bc, NULL);
+}
+
+int oneshot(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ bc = RedisModule_BlockClient(ctx, NULL, NULL, NULL, 0);
+ reply_ctx = RedisModule_GetThreadSafeContext(bc);
+
+ if (RedisModule_EventLoopAddOneShot(oneshotCallback, "userdata") != REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "ERR oneshot failed");
+ RedisModule_FreeThreadSafeContext(reply_ctx);
+ RedisModule_UnblockClient(bc, NULL);
+ }
+ return REDISMODULE_OK;
+}
+
+void eventloopCallback(struct RedisModuleCtx *ctx, RedisModuleEvent eid, uint64_t subevent, void *data) {
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(eid);
+ REDISMODULE_NOT_USED(subevent);
+ REDISMODULE_NOT_USED(data);
+
+ RedisModule_Assert(eid.id == REDISMODULE_EVENT_EVENTLOOP);
+ if (subevent == REDISMODULE_SUBEVENT_EVENTLOOP_BEFORE_SLEEP)
+ beforeSleepCount++;
+ else if (subevent == REDISMODULE_SUBEVENT_EVENTLOOP_AFTER_SLEEP)
+ afterSleepCount++;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx,"eventloop",1,REDISMODULE_APIVER_1)
+ == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ /* Test basics. */
+ if (RedisModule_CreateCommand(ctx, "test.sanity", sanity, "", 0, 0, 0)
+ == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ /* Register a command to create a pipe() and send data through it by using
+ * event loop API. */
+ if (RedisModule_CreateCommand(ctx, "test.sendbytes", sendbytes, "", 0, 0, 0)
+ == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ /* Register a command to return event loop iteration count. */
+ if (RedisModule_CreateCommand(ctx, "test.iteration", iteration, "", 0, 0, 0)
+ == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "test.oneshot", oneshot, "", 0, 0, 0)
+ == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ if (RedisModule_SubscribeToServerEvent(ctx, RedisModuleEvent_EventLoop,
+ eventloopCallback) != REDISMODULE_OK) return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/fork.c b/tests/modules/fork.c
new file mode 100644
index 0000000..d7a0d15
--- /dev/null
+++ b/tests/modules/fork.c
@@ -0,0 +1,96 @@
+
+/* define macros for having usleep */
+#define _BSD_SOURCE
+#define _DEFAULT_SOURCE
+
+#include "redismodule.h"
+#include <string.h>
+#include <assert.h>
+#include <unistd.h>
+
+#define UNUSED(V) ((void) V)
+
+int child_pid = -1;
+int exitted_with_code = -1;
+
+void done_handler(int exitcode, int bysignal, void *user_data) {
+ child_pid = -1;
+ exitted_with_code = exitcode;
+ assert(user_data==(void*)0xdeadbeef);
+ UNUSED(bysignal);
+}
+
+int fork_create(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ long long code_to_exit_with;
+ long long usleep_us;
+ if (argc != 3) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ if(!RMAPI_FUNC_SUPPORTED(RedisModule_Fork)){
+ RedisModule_ReplyWithError(ctx, "Fork api is not supported in the current redis version");
+ return REDISMODULE_OK;
+ }
+
+ RedisModule_StringToLongLong(argv[1], &code_to_exit_with);
+ RedisModule_StringToLongLong(argv[2], &usleep_us);
+ exitted_with_code = -1;
+ int fork_child_pid = RedisModule_Fork(done_handler, (void*)0xdeadbeef);
+ if (fork_child_pid < 0) {
+ RedisModule_ReplyWithError(ctx, "Fork failed");
+ return REDISMODULE_OK;
+ } else if (fork_child_pid > 0) {
+ /* parent */
+ child_pid = fork_child_pid;
+ RedisModule_ReplyWithLongLong(ctx, child_pid);
+ return REDISMODULE_OK;
+ }
+
+ /* child */
+ RedisModule_Log(ctx, "notice", "fork child started");
+ usleep(usleep_us);
+ RedisModule_Log(ctx, "notice", "fork child exiting");
+ RedisModule_ExitFromChild(code_to_exit_with);
+ /* unreachable */
+ return 0;
+}
+
+int fork_exitcode(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ UNUSED(argv);
+ UNUSED(argc);
+ RedisModule_ReplyWithLongLong(ctx, exitted_with_code);
+ return REDISMODULE_OK;
+}
+
+int fork_kill(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ UNUSED(argv);
+ UNUSED(argc);
+ if (RedisModule_KillForkChild(child_pid) != REDISMODULE_OK)
+ RedisModule_ReplyWithError(ctx, "KillForkChild failed");
+ else
+ RedisModule_ReplyWithLongLong(ctx, 1);
+ child_pid = -1;
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+ if (RedisModule_Init(ctx,"fork",1,REDISMODULE_APIVER_1)== REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"fork.create", fork_create,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"fork.exitcode", fork_exitcode,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"fork.kill", fork_kill,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/getchannels.c b/tests/modules/getchannels.c
new file mode 100644
index 0000000..330531d
--- /dev/null
+++ b/tests/modules/getchannels.c
@@ -0,0 +1,69 @@
+#include "redismodule.h"
+#include <strings.h>
+#include <assert.h>
+#include <unistd.h>
+#include <errno.h>
+
+/* A sample with declarable channels, that are used to validate against ACLs */
+int getChannels_subscribe(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if ((argc - 1) % 3 != 0) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+ char *err = NULL;
+
+ /* getchannels.command [[subscribe|unsubscribe|publish] [pattern|literal] <channel> ...]
+ * This command marks the given channel is accessed based on the
+ * provided modifiers. */
+ for (int i = 1; i < argc; i += 3) {
+ const char *operation = RedisModule_StringPtrLen(argv[i], NULL);
+ const char *type = RedisModule_StringPtrLen(argv[i+1], NULL);
+ int flags = 0;
+
+ if (!strcasecmp(operation, "subscribe")) {
+ flags |= REDISMODULE_CMD_CHANNEL_SUBSCRIBE;
+ } else if (!strcasecmp(operation, "unsubscribe")) {
+ flags |= REDISMODULE_CMD_CHANNEL_UNSUBSCRIBE;
+ } else if (!strcasecmp(operation, "publish")) {
+ flags |= REDISMODULE_CMD_CHANNEL_PUBLISH;
+ } else {
+ err = "Invalid channel operation";
+ break;
+ }
+
+ if (!strcasecmp(type, "literal")) {
+ /* No op */
+ } else if (!strcasecmp(type, "pattern")) {
+ flags |= REDISMODULE_CMD_CHANNEL_PATTERN;
+ } else {
+ err = "Invalid channel type";
+ break;
+ }
+ if (RedisModule_IsChannelsPositionRequest(ctx)) {
+ RedisModule_ChannelAtPosWithFlags(ctx, i+2, flags);
+ }
+ }
+
+ if (!RedisModule_IsChannelsPositionRequest(ctx)) {
+ if (err) {
+ RedisModule_ReplyWithError(ctx, err);
+ } else {
+ /* Normal implementation would go here, but for tests just return okay */
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ }
+ }
+
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ if (RedisModule_Init(ctx, "getchannels", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "getchannels.command", getChannels_subscribe, "getchannels-api", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/getkeys.c b/tests/modules/getkeys.c
new file mode 100644
index 0000000..cee3b3e
--- /dev/null
+++ b/tests/modules/getkeys.c
@@ -0,0 +1,178 @@
+
+#include "redismodule.h"
+#include <strings.h>
+#include <assert.h>
+#include <unistd.h>
+#include <errno.h>
+
+#define UNUSED(V) ((void) V)
+
+/* A sample movable keys command that returns a list of all
+ * arguments that follow a KEY argument, i.e.
+ */
+int getkeys_command(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ int i;
+ int count = 0;
+
+ /* Handle getkeys-api introspection */
+ if (RedisModule_IsKeysPositionRequest(ctx)) {
+ for (i = 0; i < argc; i++) {
+ size_t len;
+ const char *str = RedisModule_StringPtrLen(argv[i], &len);
+
+ if (len == 3 && !strncasecmp(str, "key", 3) && i + 1 < argc)
+ RedisModule_KeyAtPos(ctx, i + 1);
+ }
+
+ return REDISMODULE_OK;
+ }
+
+ /* Handle real command invocation */
+ RedisModule_ReplyWithArray(ctx, REDISMODULE_POSTPONED_LEN);
+ for (i = 0; i < argc; i++) {
+ size_t len;
+ const char *str = RedisModule_StringPtrLen(argv[i], &len);
+
+ if (len == 3 && !strncasecmp(str, "key", 3) && i + 1 < argc) {
+ RedisModule_ReplyWithString(ctx, argv[i+1]);
+ count++;
+ }
+ }
+ RedisModule_ReplySetArrayLength(ctx, count);
+
+ return REDISMODULE_OK;
+}
+
+int getkeys_command_with_flags(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ int i;
+ int count = 0;
+
+ /* Handle getkeys-api introspection */
+ if (RedisModule_IsKeysPositionRequest(ctx)) {
+ for (i = 0; i < argc; i++) {
+ size_t len;
+ const char *str = RedisModule_StringPtrLen(argv[i], &len);
+
+ if (len == 3 && !strncasecmp(str, "key", 3) && i + 1 < argc)
+ RedisModule_KeyAtPosWithFlags(ctx, i + 1, REDISMODULE_CMD_KEY_RO | REDISMODULE_CMD_KEY_ACCESS);
+ }
+
+ return REDISMODULE_OK;
+ }
+
+ /* Handle real command invocation */
+ RedisModule_ReplyWithArray(ctx, REDISMODULE_POSTPONED_LEN);
+ for (i = 0; i < argc; i++) {
+ size_t len;
+ const char *str = RedisModule_StringPtrLen(argv[i], &len);
+
+ if (len == 3 && !strncasecmp(str, "key", 3) && i + 1 < argc) {
+ RedisModule_ReplyWithString(ctx, argv[i+1]);
+ count++;
+ }
+ }
+ RedisModule_ReplySetArrayLength(ctx, count);
+
+ return REDISMODULE_OK;
+}
+
+int getkeys_fixed(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ int i;
+
+ RedisModule_ReplyWithArray(ctx, argc - 1);
+ for (i = 1; i < argc; i++) {
+ RedisModule_ReplyWithString(ctx, argv[i]);
+ }
+ return REDISMODULE_OK;
+}
+
+/* Introspect a command using RM_GetCommandKeys() and returns the list
+ * of keys. Essentially this is COMMAND GETKEYS implemented in a module.
+ * INTROSPECT <with-flags> <cmd> <args>
+ */
+int getkeys_introspect(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ long long with_flags = 0;
+
+ if (argc < 4) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ if (RedisModule_StringToLongLong(argv[1],&with_flags) != REDISMODULE_OK)
+ return RedisModule_ReplyWithError(ctx,"ERR invalid integer");
+
+ int num_keys, *keyflags = NULL;
+ int *keyidx = RedisModule_GetCommandKeysWithFlags(ctx, &argv[2], argc - 2, &num_keys, with_flags ? &keyflags : NULL);
+
+ if (!keyidx) {
+ if (!errno)
+ RedisModule_ReplyWithEmptyArray(ctx);
+ else {
+ char err[100];
+ switch (errno) {
+ case ENOENT:
+ RedisModule_ReplyWithError(ctx, "ERR ENOENT");
+ break;
+ case EINVAL:
+ RedisModule_ReplyWithError(ctx, "ERR EINVAL");
+ break;
+ default:
+ snprintf(err, sizeof(err) - 1, "ERR errno=%d", errno);
+ RedisModule_ReplyWithError(ctx, err);
+ break;
+ }
+ }
+ } else {
+ int i;
+
+ RedisModule_ReplyWithArray(ctx, num_keys);
+ for (i = 0; i < num_keys; i++) {
+ if (!with_flags) {
+ RedisModule_ReplyWithString(ctx, argv[2 + keyidx[i]]);
+ continue;
+ }
+ RedisModule_ReplyWithArray(ctx, 2);
+ RedisModule_ReplyWithString(ctx, argv[2 + keyidx[i]]);
+ char* sflags = "";
+ if (keyflags[i] & REDISMODULE_CMD_KEY_RO)
+ sflags = "RO";
+ else if (keyflags[i] & REDISMODULE_CMD_KEY_RW)
+ sflags = "RW";
+ else if (keyflags[i] & REDISMODULE_CMD_KEY_OW)
+ sflags = "OW";
+ else if (keyflags[i] & REDISMODULE_CMD_KEY_RM)
+ sflags = "RM";
+ RedisModule_ReplyWithCString(ctx, sflags);
+ }
+
+ RedisModule_Free(keyidx);
+ RedisModule_Free(keyflags);
+ }
+
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+ if (RedisModule_Init(ctx,"getkeys",1,REDISMODULE_APIVER_1)== REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"getkeys.command", getkeys_command,"getkeys-api",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"getkeys.command_with_flags", getkeys_command_with_flags,"getkeys-api",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"getkeys.fixed", getkeys_fixed,"",2,4,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"getkeys.introspect", getkeys_introspect,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/hash.c b/tests/modules/hash.c
new file mode 100644
index 0000000..001a34e
--- /dev/null
+++ b/tests/modules/hash.c
@@ -0,0 +1,90 @@
+#include "redismodule.h"
+#include <strings.h>
+#include <errno.h>
+#include <stdlib.h>
+
+/* If a string is ":deleted:", the special value for deleted hash fields is
+ * returned; otherwise the input string is returned. */
+static RedisModuleString *value_or_delete(RedisModuleString *s) {
+ if (!strcasecmp(RedisModule_StringPtrLen(s, NULL), ":delete:"))
+ return REDISMODULE_HASH_DELETE;
+ else
+ return s;
+}
+
+/* HASH.SET key flags field1 value1 [field2 value2 ..]
+ *
+ * Sets 1-4 fields. Returns the same as RedisModule_HashSet().
+ * Flags is a string of "nxa" where n = NX, x = XX, a = COUNT_ALL.
+ * To delete a field, use the value ":delete:".
+ */
+int hash_set(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc < 5 || argc % 2 == 0 || argc > 11)
+ return RedisModule_WrongArity(ctx);
+
+ RedisModule_AutoMemory(ctx);
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+
+ size_t flags_len;
+ const char *flags_str = RedisModule_StringPtrLen(argv[2], &flags_len);
+ int flags = REDISMODULE_HASH_NONE;
+ for (size_t i = 0; i < flags_len; i++) {
+ switch (flags_str[i]) {
+ case 'n': flags |= REDISMODULE_HASH_NX; break;
+ case 'x': flags |= REDISMODULE_HASH_XX; break;
+ case 'a': flags |= REDISMODULE_HASH_COUNT_ALL; break;
+ }
+ }
+
+ /* Test some varargs. (In real-world, use a loop and set one at a time.) */
+ int result;
+ errno = 0;
+ if (argc == 5) {
+ result = RedisModule_HashSet(key, flags,
+ argv[3], value_or_delete(argv[4]),
+ NULL);
+ } else if (argc == 7) {
+ result = RedisModule_HashSet(key, flags,
+ argv[3], value_or_delete(argv[4]),
+ argv[5], value_or_delete(argv[6]),
+ NULL);
+ } else if (argc == 9) {
+ result = RedisModule_HashSet(key, flags,
+ argv[3], value_or_delete(argv[4]),
+ argv[5], value_or_delete(argv[6]),
+ argv[7], value_or_delete(argv[8]),
+ NULL);
+ } else if (argc == 11) {
+ result = RedisModule_HashSet(key, flags,
+ argv[3], value_or_delete(argv[4]),
+ argv[5], value_or_delete(argv[6]),
+ argv[7], value_or_delete(argv[8]),
+ argv[9], value_or_delete(argv[10]),
+ NULL);
+ } else {
+ return RedisModule_ReplyWithError(ctx, "ERR too many fields");
+ }
+
+ /* Check errno */
+ if (result == 0) {
+ if (errno == ENOTSUP)
+ return RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
+ else
+ RedisModule_Assert(errno == ENOENT);
+ }
+
+ return RedisModule_ReplyWithLongLong(ctx, result);
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ if (RedisModule_Init(ctx, "hash", 1, REDISMODULE_APIVER_1) ==
+ REDISMODULE_OK &&
+ RedisModule_CreateCommand(ctx, "hash.set", hash_set, "write",
+ 1, 1, 1) == REDISMODULE_OK) {
+ return REDISMODULE_OK;
+ } else {
+ return REDISMODULE_ERR;
+ }
+}
diff --git a/tests/modules/hooks.c b/tests/modules/hooks.c
new file mode 100644
index 0000000..fc357d1
--- /dev/null
+++ b/tests/modules/hooks.c
@@ -0,0 +1,516 @@
+/* This module is used to test the server events hooks API.
+ *
+ * -----------------------------------------------------------------------------
+ *
+ * Copyright (c) 2019, Salvatore Sanfilippo <antirez at gmail dot com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Redis nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "redismodule.h"
+#include <stdio.h>
+#include <string.h>
+#include <strings.h>
+#include <assert.h>
+
+/* We need to store events to be able to test and see what we got, and we can't
+ * store them in the key-space since that would mess up rdb loading (duplicates)
+ * and be lost of flushdb. */
+RedisModuleDict *event_log = NULL;
+/* stores all the keys on which we got 'removed' event */
+RedisModuleDict *removed_event_log = NULL;
+/* stores all the subevent on which we got 'removed' event */
+RedisModuleDict *removed_subevent_type = NULL;
+/* stores all the keys on which we got 'removed' event with expiry information */
+RedisModuleDict *removed_expiry_log = NULL;
+
+typedef struct EventElement {
+ long count;
+ RedisModuleString *last_val_string;
+ long last_val_int;
+} EventElement;
+
+void LogStringEvent(RedisModuleCtx *ctx, const char* keyname, const char* data) {
+ EventElement *event = RedisModule_DictGetC(event_log, (void*)keyname, strlen(keyname), NULL);
+ if (!event) {
+ event = RedisModule_Alloc(sizeof(EventElement));
+ memset(event, 0, sizeof(EventElement));
+ RedisModule_DictSetC(event_log, (void*)keyname, strlen(keyname), event);
+ }
+ if (event->last_val_string) RedisModule_FreeString(ctx, event->last_val_string);
+ event->last_val_string = RedisModule_CreateString(ctx, data, strlen(data));
+ event->count++;
+}
+
+void LogNumericEvent(RedisModuleCtx *ctx, const char* keyname, long data) {
+ REDISMODULE_NOT_USED(ctx);
+ EventElement *event = RedisModule_DictGetC(event_log, (void*)keyname, strlen(keyname), NULL);
+ if (!event) {
+ event = RedisModule_Alloc(sizeof(EventElement));
+ memset(event, 0, sizeof(EventElement));
+ RedisModule_DictSetC(event_log, (void*)keyname, strlen(keyname), event);
+ }
+ event->last_val_int = data;
+ event->count++;
+}
+
+void FreeEvent(RedisModuleCtx *ctx, EventElement *event) {
+ if (event->last_val_string)
+ RedisModule_FreeString(ctx, event->last_val_string);
+ RedisModule_Free(event);
+}
+
+int cmdEventCount(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc != 2){
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ EventElement *event = RedisModule_DictGet(event_log, argv[1], NULL);
+ RedisModule_ReplyWithLongLong(ctx, event? event->count: 0);
+ return REDISMODULE_OK;
+}
+
+int cmdEventLast(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc != 2){
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ EventElement *event = RedisModule_DictGet(event_log, argv[1], NULL);
+ if (event && event->last_val_string)
+ RedisModule_ReplyWithString(ctx, event->last_val_string);
+ else if (event)
+ RedisModule_ReplyWithLongLong(ctx, event->last_val_int);
+ else
+ RedisModule_ReplyWithNull(ctx);
+ return REDISMODULE_OK;
+}
+
+void clearEvents(RedisModuleCtx *ctx)
+{
+ RedisModuleString *key;
+ EventElement *event;
+ RedisModuleDictIter *iter = RedisModule_DictIteratorStart(event_log, "^", NULL);
+ while((key = RedisModule_DictNext(ctx, iter, (void**)&event)) != NULL) {
+ event->count = 0;
+ event->last_val_int = 0;
+ if (event->last_val_string) RedisModule_FreeString(ctx, event->last_val_string);
+ event->last_val_string = NULL;
+ RedisModule_DictDel(event_log, key, NULL);
+ RedisModule_Free(event);
+ }
+ RedisModule_DictIteratorStop(iter);
+}
+
+int cmdEventsClear(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argc);
+ REDISMODULE_NOT_USED(argv);
+ clearEvents(ctx);
+ return REDISMODULE_OK;
+}
+
+/* Client state change callback. */
+void clientChangeCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data)
+{
+ REDISMODULE_NOT_USED(e);
+
+ RedisModuleClientInfo *ci = data;
+ char *keyname = (sub == REDISMODULE_SUBEVENT_CLIENT_CHANGE_CONNECTED) ?
+ "client-connected" : "client-disconnected";
+ LogNumericEvent(ctx, keyname, ci->id);
+}
+
+void flushdbCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data)
+{
+ REDISMODULE_NOT_USED(e);
+
+ RedisModuleFlushInfo *fi = data;
+ char *keyname = (sub == REDISMODULE_SUBEVENT_FLUSHDB_START) ?
+ "flush-start" : "flush-end";
+ LogNumericEvent(ctx, keyname, fi->dbnum);
+}
+
+void roleChangeCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data)
+{
+ REDISMODULE_NOT_USED(e);
+ REDISMODULE_NOT_USED(data);
+
+ RedisModuleReplicationInfo *ri = data;
+ char *keyname = (sub == REDISMODULE_EVENT_REPLROLECHANGED_NOW_MASTER) ?
+ "role-master" : "role-replica";
+ LogStringEvent(ctx, keyname, ri->masterhost);
+}
+
+void replicationChangeCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data)
+{
+ REDISMODULE_NOT_USED(e);
+ REDISMODULE_NOT_USED(data);
+
+ char *keyname = (sub == REDISMODULE_SUBEVENT_REPLICA_CHANGE_ONLINE) ?
+ "replica-online" : "replica-offline";
+ LogNumericEvent(ctx, keyname, 0);
+}
+
+void rasterLinkChangeCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data)
+{
+ REDISMODULE_NOT_USED(e);
+ REDISMODULE_NOT_USED(data);
+
+ char *keyname = (sub == REDISMODULE_SUBEVENT_MASTER_LINK_UP) ?
+ "masterlink-up" : "masterlink-down";
+ LogNumericEvent(ctx, keyname, 0);
+}
+
+void persistenceCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data)
+{
+ REDISMODULE_NOT_USED(e);
+ REDISMODULE_NOT_USED(data);
+
+ char *keyname = NULL;
+ switch (sub) {
+ case REDISMODULE_SUBEVENT_PERSISTENCE_RDB_START: keyname = "persistence-rdb-start"; break;
+ case REDISMODULE_SUBEVENT_PERSISTENCE_AOF_START: keyname = "persistence-aof-start"; break;
+ case REDISMODULE_SUBEVENT_PERSISTENCE_SYNC_AOF_START: keyname = "persistence-syncaof-start"; break;
+ case REDISMODULE_SUBEVENT_PERSISTENCE_SYNC_RDB_START: keyname = "persistence-syncrdb-start"; break;
+ case REDISMODULE_SUBEVENT_PERSISTENCE_ENDED: keyname = "persistence-end"; break;
+ case REDISMODULE_SUBEVENT_PERSISTENCE_FAILED: keyname = "persistence-failed"; break;
+ }
+ /* modifying the keyspace from the fork child is not an option, using log instead */
+ RedisModule_Log(ctx, "warning", "module-event-%s", keyname);
+ if (sub == REDISMODULE_SUBEVENT_PERSISTENCE_SYNC_RDB_START ||
+ sub == REDISMODULE_SUBEVENT_PERSISTENCE_SYNC_AOF_START)
+ {
+ LogNumericEvent(ctx, keyname, 0);
+ }
+}
+
+void loadingCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data)
+{
+ REDISMODULE_NOT_USED(e);
+ REDISMODULE_NOT_USED(data);
+
+ char *keyname = NULL;
+ switch (sub) {
+ case REDISMODULE_SUBEVENT_LOADING_RDB_START: keyname = "loading-rdb-start"; break;
+ case REDISMODULE_SUBEVENT_LOADING_AOF_START: keyname = "loading-aof-start"; break;
+ case REDISMODULE_SUBEVENT_LOADING_REPL_START: keyname = "loading-repl-start"; break;
+ case REDISMODULE_SUBEVENT_LOADING_ENDED: keyname = "loading-end"; break;
+ case REDISMODULE_SUBEVENT_LOADING_FAILED: keyname = "loading-failed"; break;
+ }
+ LogNumericEvent(ctx, keyname, 0);
+}
+
+void loadingProgressCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data)
+{
+ REDISMODULE_NOT_USED(e);
+
+ RedisModuleLoadingProgress *ei = data;
+ char *keyname = (sub == REDISMODULE_SUBEVENT_LOADING_PROGRESS_RDB) ?
+ "loading-progress-rdb" : "loading-progress-aof";
+ LogNumericEvent(ctx, keyname, ei->progress);
+}
+
+void shutdownCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data)
+{
+ REDISMODULE_NOT_USED(e);
+ REDISMODULE_NOT_USED(data);
+ REDISMODULE_NOT_USED(sub);
+
+ RedisModule_Log(ctx, "warning", "module-event-%s", "shutdown");
+}
+
+void cronLoopCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data)
+{
+ REDISMODULE_NOT_USED(e);
+ REDISMODULE_NOT_USED(sub);
+
+ RedisModuleCronLoop *ei = data;
+ LogNumericEvent(ctx, "cron-loop", ei->hz);
+}
+
+void moduleChangeCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data)
+{
+ REDISMODULE_NOT_USED(e);
+
+ RedisModuleModuleChange *ei = data;
+ char *keyname = (sub == REDISMODULE_SUBEVENT_MODULE_LOADED) ?
+ "module-loaded" : "module-unloaded";
+ LogStringEvent(ctx, keyname, ei->module_name);
+}
+
+void swapDbCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data)
+{
+ REDISMODULE_NOT_USED(e);
+ REDISMODULE_NOT_USED(sub);
+
+ RedisModuleSwapDbInfo *ei = data;
+ LogNumericEvent(ctx, "swapdb-first", ei->dbnum_first);
+ LogNumericEvent(ctx, "swapdb-second", ei->dbnum_second);
+}
+
+void configChangeCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data)
+{
+ REDISMODULE_NOT_USED(e);
+ if (sub != REDISMODULE_SUBEVENT_CONFIG_CHANGE) {
+ return;
+ }
+
+ RedisModuleConfigChangeV1 *ei = data;
+ LogNumericEvent(ctx, "config-change-count", ei->num_changes);
+ LogStringEvent(ctx, "config-change-first", ei->config_names[0]);
+}
+
+void keyInfoCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data)
+{
+ REDISMODULE_NOT_USED(e);
+
+ RedisModuleKeyInfoV1 *ei = data;
+ RedisModuleKey *kp = ei->key;
+ RedisModuleString *key = (RedisModuleString *) RedisModule_GetKeyNameFromModuleKey(kp);
+ const char *keyname = RedisModule_StringPtrLen(key, NULL);
+ RedisModuleString *event_keyname = RedisModule_CreateStringPrintf(ctx, "key-info-%s", keyname);
+ LogStringEvent(ctx, RedisModule_StringPtrLen(event_keyname, NULL), keyname);
+ RedisModule_FreeString(ctx, event_keyname);
+
+ /* Despite getting a key object from the callback, we also try to re-open it
+ * to make sure the callback is called before it is actually removed from the keyspace. */
+ RedisModuleKey *kp_open = RedisModule_OpenKey(ctx, key, REDISMODULE_READ);
+ assert(RedisModule_ValueLength(kp) == RedisModule_ValueLength(kp_open));
+ RedisModule_CloseKey(kp_open);
+
+ /* We also try to RM_Call a command that accesses that key, also to make sure it's still in the keyspace. */
+ char *size_command = NULL;
+ int key_type = RedisModule_KeyType(kp);
+ if (key_type == REDISMODULE_KEYTYPE_STRING) {
+ size_command = "STRLEN";
+ } else if (key_type == REDISMODULE_KEYTYPE_LIST) {
+ size_command = "LLEN";
+ } else if (key_type == REDISMODULE_KEYTYPE_HASH) {
+ size_command = "HLEN";
+ } else if (key_type == REDISMODULE_KEYTYPE_SET) {
+ size_command = "SCARD";
+ } else if (key_type == REDISMODULE_KEYTYPE_ZSET) {
+ size_command = "ZCARD";
+ } else if (key_type == REDISMODULE_KEYTYPE_STREAM) {
+ size_command = "XLEN";
+ }
+ if (size_command != NULL) {
+ RedisModuleCallReply *reply = RedisModule_Call(ctx, size_command, "s", key);
+ assert(reply != NULL);
+ assert(RedisModule_ValueLength(kp) == (size_t) RedisModule_CallReplyInteger(reply));
+ RedisModule_FreeCallReply(reply);
+ }
+
+ /* Now use the key object we got from the callback for various validations. */
+ RedisModuleString *prev = RedisModule_DictGetC(removed_event_log, (void*)keyname, strlen(keyname), NULL);
+ /* We keep object length */
+ RedisModuleString *v = RedisModule_CreateStringPrintf(ctx, "%zd", RedisModule_ValueLength(kp));
+ /* For string type, we keep value instead of length */
+ if (RedisModule_KeyType(kp) == REDISMODULE_KEYTYPE_STRING) {
+ RedisModule_FreeString(ctx, v);
+ size_t len;
+ /* We need to access the string value with RedisModule_StringDMA.
+ * RedisModule_StringDMA may call dbUnshareStringValue to free the origin object,
+ * so we also can test it. */
+ char *s = RedisModule_StringDMA(kp, &len, REDISMODULE_READ);
+ v = RedisModule_CreateString(ctx, s, len);
+ }
+ RedisModule_DictReplaceC(removed_event_log, (void*)keyname, strlen(keyname), v);
+ if (prev != NULL) {
+ RedisModule_FreeString(ctx, prev);
+ }
+
+ const char *subevent = "deleted";
+ if (sub == REDISMODULE_SUBEVENT_KEY_EXPIRED) {
+ subevent = "expired";
+ } else if (sub == REDISMODULE_SUBEVENT_KEY_EVICTED) {
+ subevent = "evicted";
+ } else if (sub == REDISMODULE_SUBEVENT_KEY_OVERWRITTEN) {
+ subevent = "overwritten";
+ }
+ RedisModule_DictReplaceC(removed_subevent_type, (void*)keyname, strlen(keyname), (void *)subevent);
+
+ RedisModuleString *prevexpire = RedisModule_DictGetC(removed_expiry_log, (void*)keyname, strlen(keyname), NULL);
+ RedisModuleString *expire = RedisModule_CreateStringPrintf(ctx, "%lld", RedisModule_GetAbsExpire(kp));
+ RedisModule_DictReplaceC(removed_expiry_log, (void*)keyname, strlen(keyname), (void *)expire);
+ if (prevexpire != NULL) {
+ RedisModule_FreeString(ctx, prevexpire);
+ }
+}
+
+static int cmdIsKeyRemoved(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ if(argc != 2){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ const char *key = RedisModule_StringPtrLen(argv[1], NULL);
+
+ RedisModuleString *value = RedisModule_DictGetC(removed_event_log, (void*)key, strlen(key), NULL);
+
+ if (value == NULL) {
+ return RedisModule_ReplyWithError(ctx, "ERR Key was not removed");
+ }
+
+ const char *subevent = RedisModule_DictGetC(removed_subevent_type, (void*)key, strlen(key), NULL);
+ RedisModule_ReplyWithArray(ctx, 2);
+ RedisModule_ReplyWithString(ctx, value);
+ RedisModule_ReplyWithSimpleString(ctx, subevent);
+
+ return REDISMODULE_OK;
+}
+
+static int cmdKeyExpiry(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ if(argc != 2){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ const char* key = RedisModule_StringPtrLen(argv[1], NULL);
+ RedisModuleString *expire = RedisModule_DictGetC(removed_expiry_log, (void*)key, strlen(key), NULL);
+ if (expire == NULL) {
+ return RedisModule_ReplyWithError(ctx, "ERR Key was not removed");
+ }
+ RedisModule_ReplyWithString(ctx, expire);
+ return REDISMODULE_OK;
+}
+
+/* This function must be present on each Redis module. It is used in order to
+ * register the commands into the Redis server. */
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+#define VerifySubEventSupported(e, s) \
+ if (!RedisModule_IsSubEventSupported(e, s)) { \
+ return REDISMODULE_ERR; \
+ }
+
+ if (RedisModule_Init(ctx,"testhook",1,REDISMODULE_APIVER_1)
+ == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ /* Example on how to check if a server sub event is supported */
+ if (!RedisModule_IsSubEventSupported(RedisModuleEvent_ReplicationRoleChanged, REDISMODULE_EVENT_REPLROLECHANGED_NOW_MASTER)) {
+ return REDISMODULE_ERR;
+ }
+
+ /* replication related hooks */
+ RedisModule_SubscribeToServerEvent(ctx,
+ RedisModuleEvent_ReplicationRoleChanged, roleChangeCallback);
+ RedisModule_SubscribeToServerEvent(ctx,
+ RedisModuleEvent_ReplicaChange, replicationChangeCallback);
+ RedisModule_SubscribeToServerEvent(ctx,
+ RedisModuleEvent_MasterLinkChange, rasterLinkChangeCallback);
+
+ /* persistence related hooks */
+ RedisModule_SubscribeToServerEvent(ctx,
+ RedisModuleEvent_Persistence, persistenceCallback);
+ RedisModule_SubscribeToServerEvent(ctx,
+ RedisModuleEvent_Loading, loadingCallback);
+ RedisModule_SubscribeToServerEvent(ctx,
+ RedisModuleEvent_LoadingProgress, loadingProgressCallback);
+
+ /* other hooks */
+ RedisModule_SubscribeToServerEvent(ctx,
+ RedisModuleEvent_ClientChange, clientChangeCallback);
+ RedisModule_SubscribeToServerEvent(ctx,
+ RedisModuleEvent_FlushDB, flushdbCallback);
+ RedisModule_SubscribeToServerEvent(ctx,
+ RedisModuleEvent_Shutdown, shutdownCallback);
+ RedisModule_SubscribeToServerEvent(ctx,
+ RedisModuleEvent_CronLoop, cronLoopCallback);
+
+ RedisModule_SubscribeToServerEvent(ctx,
+ RedisModuleEvent_ModuleChange, moduleChangeCallback);
+ RedisModule_SubscribeToServerEvent(ctx,
+ RedisModuleEvent_SwapDB, swapDbCallback);
+
+ RedisModule_SubscribeToServerEvent(ctx,
+ RedisModuleEvent_Config, configChangeCallback);
+
+ RedisModule_SubscribeToServerEvent(ctx,
+ RedisModuleEvent_Key, keyInfoCallback);
+
+ event_log = RedisModule_CreateDict(ctx);
+ removed_event_log = RedisModule_CreateDict(ctx);
+ removed_subevent_type = RedisModule_CreateDict(ctx);
+ removed_expiry_log = RedisModule_CreateDict(ctx);
+
+ if (RedisModule_CreateCommand(ctx,"hooks.event_count", cmdEventCount,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"hooks.event_last", cmdEventLast,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"hooks.clear", cmdEventsClear,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"hooks.is_key_removed", cmdIsKeyRemoved,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"hooks.pexpireat", cmdKeyExpiry,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (argc == 1) {
+ const char *ptr = RedisModule_StringPtrLen(argv[0], NULL);
+ if (!strcasecmp(ptr, "noload")) {
+ /* This is a hint that we return ERR at the last moment of OnLoad. */
+ RedisModule_FreeDict(ctx, event_log);
+ RedisModule_FreeDict(ctx, removed_event_log);
+ RedisModule_FreeDict(ctx, removed_subevent_type);
+ RedisModule_FreeDict(ctx, removed_expiry_log);
+ return REDISMODULE_ERR;
+ }
+ }
+
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnUnload(RedisModuleCtx *ctx) {
+ clearEvents(ctx);
+ RedisModule_FreeDict(ctx, event_log);
+ event_log = NULL;
+
+ RedisModuleDictIter *iter = RedisModule_DictIteratorStartC(removed_event_log, "^", NULL, 0);
+ char* key;
+ size_t keyLen;
+ RedisModuleString* val;
+ while((key = RedisModule_DictNextC(iter, &keyLen, (void**)&val))){
+ RedisModule_FreeString(ctx, val);
+ }
+ RedisModule_FreeDict(ctx, removed_event_log);
+ RedisModule_DictIteratorStop(iter);
+ removed_event_log = NULL;
+
+ RedisModule_FreeDict(ctx, removed_subevent_type);
+ removed_subevent_type = NULL;
+
+ iter = RedisModule_DictIteratorStartC(removed_expiry_log, "^", NULL, 0);
+ while((key = RedisModule_DictNextC(iter, &keyLen, (void**)&val))){
+ RedisModule_FreeString(ctx, val);
+ }
+ RedisModule_FreeDict(ctx, removed_expiry_log);
+ RedisModule_DictIteratorStop(iter);
+ removed_expiry_log = NULL;
+
+ return REDISMODULE_OK;
+}
+
diff --git a/tests/modules/infotest.c b/tests/modules/infotest.c
new file mode 100644
index 0000000..87a89dc
--- /dev/null
+++ b/tests/modules/infotest.c
@@ -0,0 +1,119 @@
+#include "redismodule.h"
+
+#include <string.h>
+
+void InfoFunc(RedisModuleInfoCtx *ctx, int for_crash_report) {
+ RedisModule_InfoAddSection(ctx, "");
+ RedisModule_InfoAddFieldLongLong(ctx, "global", -2);
+ RedisModule_InfoAddFieldULongLong(ctx, "uglobal", (unsigned long long)-2);
+
+ RedisModule_InfoAddSection(ctx, "Spanish");
+ RedisModule_InfoAddFieldCString(ctx, "uno", "one");
+ RedisModule_InfoAddFieldLongLong(ctx, "dos", 2);
+
+ RedisModule_InfoAddSection(ctx, "Italian");
+ RedisModule_InfoAddFieldLongLong(ctx, "due", 2);
+ RedisModule_InfoAddFieldDouble(ctx, "tre", 3.3);
+
+ RedisModule_InfoAddSection(ctx, "keyspace");
+ RedisModule_InfoBeginDictField(ctx, "db0");
+ RedisModule_InfoAddFieldLongLong(ctx, "keys", 3);
+ RedisModule_InfoAddFieldLongLong(ctx, "expires", 1);
+ RedisModule_InfoEndDictField(ctx);
+
+ RedisModule_InfoAddSection(ctx, "unsafe");
+ RedisModule_InfoBeginDictField(ctx, "unsafe:field");
+ RedisModule_InfoAddFieldLongLong(ctx, "value", 1);
+ RedisModule_InfoEndDictField(ctx);
+
+ if (for_crash_report) {
+ RedisModule_InfoAddSection(ctx, "Klingon");
+ RedisModule_InfoAddFieldCString(ctx, "one", "wa’");
+ RedisModule_InfoAddFieldCString(ctx, "two", "cha’");
+ RedisModule_InfoAddFieldCString(ctx, "three", "wej");
+ }
+
+}
+
+int info_get(RedisModuleCtx *ctx, RedisModuleString **argv, int argc, char field_type)
+{
+ if (argc != 3 && argc != 4) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+ int err = REDISMODULE_OK;
+ const char *section, *field;
+ section = RedisModule_StringPtrLen(argv[1], NULL);
+ field = RedisModule_StringPtrLen(argv[2], NULL);
+ RedisModuleServerInfoData *info = RedisModule_GetServerInfo(ctx, section);
+ if (field_type=='i') {
+ long long ll = RedisModule_ServerInfoGetFieldSigned(info, field, &err);
+ if (err==REDISMODULE_OK)
+ RedisModule_ReplyWithLongLong(ctx, ll);
+ } else if (field_type=='u') {
+ unsigned long long ll = (unsigned long long)RedisModule_ServerInfoGetFieldUnsigned(info, field, &err);
+ if (err==REDISMODULE_OK)
+ RedisModule_ReplyWithLongLong(ctx, ll);
+ } else if (field_type=='d') {
+ double d = RedisModule_ServerInfoGetFieldDouble(info, field, &err);
+ if (err==REDISMODULE_OK)
+ RedisModule_ReplyWithDouble(ctx, d);
+ } else if (field_type=='c') {
+ const char *str = RedisModule_ServerInfoGetFieldC(info, field);
+ if (str)
+ RedisModule_ReplyWithCString(ctx, str);
+ } else {
+ RedisModuleString *str = RedisModule_ServerInfoGetField(ctx, info, field);
+ if (str) {
+ RedisModule_ReplyWithString(ctx, str);
+ RedisModule_FreeString(ctx, str);
+ } else
+ err=REDISMODULE_ERR;
+ }
+ if (err!=REDISMODULE_OK)
+ RedisModule_ReplyWithError(ctx, "not found");
+ RedisModule_FreeServerInfo(ctx, info);
+ return REDISMODULE_OK;
+}
+
+int info_gets(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ return info_get(ctx, argv, argc, 's');
+}
+
+int info_getc(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ return info_get(ctx, argv, argc, 'c');
+}
+
+int info_geti(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ return info_get(ctx, argv, argc, 'i');
+}
+
+int info_getu(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ return info_get(ctx, argv, argc, 'u');
+}
+
+int info_getd(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ return info_get(ctx, argv, argc, 'd');
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ if (RedisModule_Init(ctx,"infotest",1,REDISMODULE_APIVER_1)
+ == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ if (RedisModule_RegisterInfoFunc(ctx, InfoFunc) == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"info.gets", info_gets,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"info.getc", info_getc,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"info.geti", info_geti,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"info.getu", info_getu,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"info.getd", info_getd,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/keyspace_events.c b/tests/modules/keyspace_events.c
new file mode 100644
index 0000000..1a284b5
--- /dev/null
+++ b/tests/modules/keyspace_events.c
@@ -0,0 +1,440 @@
+/* This module is used to test the server keyspace events API.
+ *
+ * -----------------------------------------------------------------------------
+ *
+ * Copyright (c) 2020, Meir Shpilraien <meir at redislabs dot com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Redis nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define _BSD_SOURCE
+#define _DEFAULT_SOURCE /* For usleep */
+
+#include "redismodule.h"
+#include <stdio.h>
+#include <string.h>
+#include <strings.h>
+#include <unistd.h>
+
+ustime_t cached_time = 0;
+
+/** stores all the keys on which we got 'loaded' keyspace notification **/
+RedisModuleDict *loaded_event_log = NULL;
+/** stores all the keys on which we got 'module' keyspace notification **/
+RedisModuleDict *module_event_log = NULL;
+
+/** Counts how many deleted KSN we got on keys with a prefix of "count_dels_" **/
+static size_t dels = 0;
+
+static int KeySpace_NotificationLoaded(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key){
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(type);
+
+ if(strcmp(event, "loaded") == 0){
+ const char* keyName = RedisModule_StringPtrLen(key, NULL);
+ int nokey;
+ RedisModule_DictGetC(loaded_event_log, (void*)keyName, strlen(keyName), &nokey);
+ if(nokey){
+ RedisModule_DictSetC(loaded_event_log, (void*)keyName, strlen(keyName), RedisModule_HoldString(ctx, key));
+ }
+ }
+
+ return REDISMODULE_OK;
+}
+
+static int KeySpace_NotificationGeneric(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key) {
+ REDISMODULE_NOT_USED(type);
+ const char *key_str = RedisModule_StringPtrLen(key, NULL);
+ if (strncmp(key_str, "count_dels_", 11) == 0 && strcmp(event, "del") == 0) {
+ if (RedisModule_GetContextFlags(ctx) & REDISMODULE_CTX_FLAGS_MASTER) {
+ dels++;
+ RedisModule_Replicate(ctx, "keyspace.incr_dels", "");
+ }
+ return REDISMODULE_OK;
+ }
+ if (cached_time) {
+ RedisModule_Assert(cached_time == RedisModule_CachedMicroseconds());
+ usleep(1);
+ RedisModule_Assert(cached_time != RedisModule_Microseconds());
+ }
+
+ if (strcmp(event, "del") == 0) {
+ RedisModuleString *copykey = RedisModule_CreateStringPrintf(ctx, "%s_copy", RedisModule_StringPtrLen(key, NULL));
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "DEL", "s!", copykey);
+ RedisModule_FreeString(ctx, copykey);
+ RedisModule_FreeCallReply(rep);
+
+ int ctx_flags = RedisModule_GetContextFlags(ctx);
+ if (ctx_flags & REDISMODULE_CTX_FLAGS_LUA) {
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "INCR", "c", "lua");
+ RedisModule_FreeCallReply(rep);
+ }
+ if (ctx_flags & REDISMODULE_CTX_FLAGS_MULTI) {
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "INCR", "c", "multi");
+ RedisModule_FreeCallReply(rep);
+ }
+ }
+
+ return REDISMODULE_OK;
+}
+
+static int KeySpace_NotificationExpired(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key) {
+ REDISMODULE_NOT_USED(type);
+ REDISMODULE_NOT_USED(event);
+ REDISMODULE_NOT_USED(key);
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "INCR", "c!", "testkeyspace:expired");
+ RedisModule_FreeCallReply(rep);
+
+ return REDISMODULE_OK;
+}
+
+/* This key miss notification handler is performing a write command inside the notification callback.
+ * Notice, it is discourage and currently wrong to perform a write command inside key miss event.
+ * It can cause read commands to be replicated to the replica/aof. This test is here temporary (for coverage and
+ * verification that it's not crashing). */
+static int KeySpace_NotificationModuleKeyMiss(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key) {
+ REDISMODULE_NOT_USED(type);
+ REDISMODULE_NOT_USED(event);
+ REDISMODULE_NOT_USED(key);
+
+ int flags = RedisModule_GetContextFlags(ctx);
+ if (!(flags & REDISMODULE_CTX_FLAGS_MASTER)) {
+ return REDISMODULE_OK; // ignore the event on replica
+ }
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "incr", "!c", "missed");
+ RedisModule_FreeCallReply(rep);
+
+ return REDISMODULE_OK;
+}
+
+static int KeySpace_NotificationModuleString(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key) {
+ REDISMODULE_NOT_USED(type);
+ REDISMODULE_NOT_USED(event);
+ RedisModuleKey *redis_key = RedisModule_OpenKey(ctx, key, REDISMODULE_READ);
+
+ size_t len = 0;
+ /* RedisModule_StringDMA could change the data format and cause the old robj to be freed.
+ * This code verifies that such format change will not cause any crashes.*/
+ char *data = RedisModule_StringDMA(redis_key, &len, REDISMODULE_READ);
+ int res = strncmp(data, "dummy", 5);
+ REDISMODULE_NOT_USED(res);
+
+ RedisModule_CloseKey(redis_key);
+
+ return REDISMODULE_OK;
+}
+
+static void KeySpace_PostNotificationStringFreePD(void *pd) {
+ RedisModule_FreeString(NULL, pd);
+}
+
+static void KeySpace_PostNotificationString(RedisModuleCtx *ctx, void *pd) {
+ REDISMODULE_NOT_USED(ctx);
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "incr", "!s", pd);
+ RedisModule_FreeCallReply(rep);
+}
+
+static int KeySpace_NotificationModuleStringPostNotificationJob(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key) {
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(type);
+ REDISMODULE_NOT_USED(event);
+
+ const char *key_str = RedisModule_StringPtrLen(key, NULL);
+
+ if (strncmp(key_str, "string1_", 8) != 0) {
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleString *new_key = RedisModule_CreateStringPrintf(NULL, "string_changed{%s}", key_str);
+ RedisModule_AddPostNotificationJob(ctx, KeySpace_PostNotificationString, new_key, KeySpace_PostNotificationStringFreePD);
+ return REDISMODULE_OK;
+}
+
+static int KeySpace_NotificationModule(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key) {
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(type);
+ REDISMODULE_NOT_USED(event);
+
+ const char* keyName = RedisModule_StringPtrLen(key, NULL);
+ int nokey;
+ RedisModule_DictGetC(module_event_log, (void*)keyName, strlen(keyName), &nokey);
+ if(nokey){
+ RedisModule_DictSetC(module_event_log, (void*)keyName, strlen(keyName), RedisModule_HoldString(ctx, key));
+ }
+ return REDISMODULE_OK;
+}
+
+static int cmdNotify(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ if(argc != 2){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ RedisModule_NotifyKeyspaceEvent(ctx, REDISMODULE_NOTIFY_MODULE, "notify", argv[1]);
+ RedisModule_ReplyWithNull(ctx);
+ return REDISMODULE_OK;
+}
+
+static int cmdIsModuleKeyNotified(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ if(argc != 2){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ const char* key = RedisModule_StringPtrLen(argv[1], NULL);
+
+ int nokey;
+ RedisModuleString* keyStr = RedisModule_DictGetC(module_event_log, (void*)key, strlen(key), &nokey);
+
+ RedisModule_ReplyWithArray(ctx, 2);
+ RedisModule_ReplyWithLongLong(ctx, !nokey);
+ if(nokey){
+ RedisModule_ReplyWithNull(ctx);
+ }else{
+ RedisModule_ReplyWithString(ctx, keyStr);
+ }
+ return REDISMODULE_OK;
+}
+
+static int cmdIsKeyLoaded(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ if(argc != 2){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ const char* key = RedisModule_StringPtrLen(argv[1], NULL);
+
+ int nokey;
+ RedisModuleString* keyStr = RedisModule_DictGetC(loaded_event_log, (void*)key, strlen(key), &nokey);
+
+ RedisModule_ReplyWithArray(ctx, 2);
+ RedisModule_ReplyWithLongLong(ctx, !nokey);
+ if(nokey){
+ RedisModule_ReplyWithNull(ctx);
+ }else{
+ RedisModule_ReplyWithString(ctx, keyStr);
+ }
+ return REDISMODULE_OK;
+}
+
+static int cmdDelKeyCopy(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2)
+ return RedisModule_WrongArity(ctx);
+
+ cached_time = RedisModule_CachedMicroseconds();
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "DEL", "s!", argv[1]);
+ if (!rep) {
+ RedisModule_ReplyWithError(ctx, "NULL reply returned");
+ } else {
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ }
+ cached_time = 0;
+ return REDISMODULE_OK;
+}
+
+/* Call INCR and propagate using RM_Call with `!`. */
+static int cmdIncrCase1(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2)
+ return RedisModule_WrongArity(ctx);
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "INCR", "s!", argv[1]);
+ if (!rep) {
+ RedisModule_ReplyWithError(ctx, "NULL reply returned");
+ } else {
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ }
+ return REDISMODULE_OK;
+}
+
+/* Call INCR and propagate using RM_Replicate. */
+static int cmdIncrCase2(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2)
+ return RedisModule_WrongArity(ctx);
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "INCR", "s", argv[1]);
+ if (!rep) {
+ RedisModule_ReplyWithError(ctx, "NULL reply returned");
+ } else {
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ }
+ RedisModule_Replicate(ctx, "INCR", "s", argv[1]);
+ return REDISMODULE_OK;
+}
+
+/* Call INCR and propagate using RM_ReplicateVerbatim. */
+static int cmdIncrCase3(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2)
+ return RedisModule_WrongArity(ctx);
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "INCR", "s", argv[1]);
+ if (!rep) {
+ RedisModule_ReplyWithError(ctx, "NULL reply returned");
+ } else {
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ }
+ RedisModule_ReplicateVerbatim(ctx);
+ return REDISMODULE_OK;
+}
+
+static int cmdIncrDels(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ dels++;
+ return RedisModule_ReplyWithSimpleString(ctx, "OK");
+}
+
+static int cmdGetDels(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ return RedisModule_ReplyWithLongLong(ctx, dels);
+}
+
+/* This function must be present on each Redis module. It is used in order to
+ * register the commands into the Redis server. */
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (RedisModule_Init(ctx,"testkeyspace",1,REDISMODULE_APIVER_1) == REDISMODULE_ERR){
+ return REDISMODULE_ERR;
+ }
+
+ loaded_event_log = RedisModule_CreateDict(ctx);
+ module_event_log = RedisModule_CreateDict(ctx);
+
+ int keySpaceAll = RedisModule_GetKeyspaceNotificationFlagsAll();
+
+ if (!(keySpaceAll & REDISMODULE_NOTIFY_LOADED)) {
+ // REDISMODULE_NOTIFY_LOADED event are not supported we can not start
+ return REDISMODULE_ERR;
+ }
+
+ if(RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_LOADED, KeySpace_NotificationLoaded) != REDISMODULE_OK){
+ return REDISMODULE_ERR;
+ }
+
+ if(RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_GENERIC, KeySpace_NotificationGeneric) != REDISMODULE_OK){
+ return REDISMODULE_ERR;
+ }
+
+ if(RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_EXPIRED, KeySpace_NotificationExpired) != REDISMODULE_OK){
+ return REDISMODULE_ERR;
+ }
+
+ if(RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_MODULE, KeySpace_NotificationModule) != REDISMODULE_OK){
+ return REDISMODULE_ERR;
+ }
+
+ if(RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_KEY_MISS, KeySpace_NotificationModuleKeyMiss) != REDISMODULE_OK){
+ return REDISMODULE_ERR;
+ }
+
+ if(RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_STRING, KeySpace_NotificationModuleString) != REDISMODULE_OK){
+ return REDISMODULE_ERR;
+ }
+
+ if(RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_STRING, KeySpace_NotificationModuleStringPostNotificationJob) != REDISMODULE_OK){
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx,"keyspace.notify", cmdNotify,"",0,0,0) == REDISMODULE_ERR){
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx,"keyspace.is_module_key_notified", cmdIsModuleKeyNotified,"",0,0,0) == REDISMODULE_ERR){
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx,"keyspace.is_key_loaded", cmdIsKeyLoaded,"",0,0,0) == REDISMODULE_ERR){
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx, "keyspace.del_key_copy", cmdDelKeyCopy,
+ "write", 0, 0, 0) == REDISMODULE_ERR){
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx, "keyspace.incr_case1", cmdIncrCase1,
+ "write", 0, 0, 0) == REDISMODULE_ERR){
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx, "keyspace.incr_case2", cmdIncrCase2,
+ "write", 0, 0, 0) == REDISMODULE_ERR){
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx, "keyspace.incr_case3", cmdIncrCase3,
+ "write", 0, 0, 0) == REDISMODULE_ERR){
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx, "keyspace.incr_dels", cmdIncrDels,
+ "write", 0, 0, 0) == REDISMODULE_ERR){
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx, "keyspace.get_dels", cmdGetDels,
+ "readonly", 0, 0, 0) == REDISMODULE_ERR){
+ return REDISMODULE_ERR;
+ }
+
+ if (argc == 1) {
+ const char *ptr = RedisModule_StringPtrLen(argv[0], NULL);
+ if (!strcasecmp(ptr, "noload")) {
+ /* This is a hint that we return ERR at the last moment of OnLoad. */
+ RedisModule_FreeDict(ctx, loaded_event_log);
+ RedisModule_FreeDict(ctx, module_event_log);
+ return REDISMODULE_ERR;
+ }
+ }
+
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnUnload(RedisModuleCtx *ctx) {
+ RedisModuleDictIter *iter = RedisModule_DictIteratorStartC(loaded_event_log, "^", NULL, 0);
+ char* key;
+ size_t keyLen;
+ RedisModuleString* val;
+ while((key = RedisModule_DictNextC(iter, &keyLen, (void**)&val))){
+ RedisModule_FreeString(ctx, val);
+ }
+ RedisModule_FreeDict(ctx, loaded_event_log);
+ RedisModule_DictIteratorStop(iter);
+ loaded_event_log = NULL;
+
+ iter = RedisModule_DictIteratorStartC(module_event_log, "^", NULL, 0);
+ while((key = RedisModule_DictNextC(iter, &keyLen, (void**)&val))){
+ RedisModule_FreeString(ctx, val);
+ }
+ RedisModule_FreeDict(ctx, module_event_log);
+ RedisModule_DictIteratorStop(iter);
+ module_event_log = NULL;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/keyspecs.c b/tests/modules/keyspecs.c
new file mode 100644
index 0000000..0a70de8
--- /dev/null
+++ b/tests/modules/keyspecs.c
@@ -0,0 +1,236 @@
+#include "redismodule.h"
+
+#define UNUSED(V) ((void) V)
+
+/* This function implements all commands in this module. All we care about is
+ * the COMMAND metadata anyway. */
+int kspec_impl(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+
+ /* Handle getkeys-api introspection (for "kspec.nonewithgetkeys") */
+ if (RedisModule_IsKeysPositionRequest(ctx)) {
+ for (int i = 1; i < argc; i += 2)
+ RedisModule_KeyAtPosWithFlags(ctx, i, REDISMODULE_CMD_KEY_RO | REDISMODULE_CMD_KEY_ACCESS);
+
+ return REDISMODULE_OK;
+ }
+
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+int createKspecNone(RedisModuleCtx *ctx) {
+ /* A command without keyspecs; only the legacy (first,last,step) triple (MSET like spec). */
+ if (RedisModule_CreateCommand(ctx,"kspec.none",kspec_impl,"",1,-1,2) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ return REDISMODULE_OK;
+}
+
+int createKspecNoneWithGetkeys(RedisModuleCtx *ctx) {
+ /* A command without keyspecs; only the legacy (first,last,step) triple (MSET like spec), but also has a getkeys callback */
+ if (RedisModule_CreateCommand(ctx,"kspec.nonewithgetkeys",kspec_impl,"getkeys-api",1,-1,2) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ return REDISMODULE_OK;
+}
+
+int createKspecTwoRanges(RedisModuleCtx *ctx) {
+ /* Test that two position/range-based key specs are combined to produce the
+ * legacy (first,last,step) values representing both keys. */
+ if (RedisModule_CreateCommand(ctx,"kspec.tworanges",kspec_impl,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ RedisModuleCommand *command = RedisModule_GetCommand(ctx,"kspec.tworanges");
+ RedisModuleCommandInfo info = {
+ .version = REDISMODULE_COMMAND_INFO_VERSION,
+ .arity = -2,
+ .key_specs = (RedisModuleCommandKeySpec[]){
+ {
+ .flags = REDISMODULE_CMD_KEY_RO | REDISMODULE_CMD_KEY_ACCESS,
+ .begin_search_type = REDISMODULE_KSPEC_BS_INDEX,
+ .bs.index.pos = 1,
+ .find_keys_type = REDISMODULE_KSPEC_FK_RANGE,
+ .fk.range = {0,1,0}
+ },
+ {
+ .flags = REDISMODULE_CMD_KEY_RW | REDISMODULE_CMD_KEY_UPDATE,
+ .begin_search_type = REDISMODULE_KSPEC_BS_INDEX,
+ .bs.index.pos = 2,
+ /* Omitted find_keys_type is shorthand for RANGE {0,1,0} */
+ },
+ {0}
+ }
+ };
+ if (RedisModule_SetCommandInfo(command, &info) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
+
+int createKspecTwoRangesWithGap(RedisModuleCtx *ctx) {
+ /* Test that two position/range-based key specs are combined to produce the
+ * legacy (first,last,step) values representing just one key. */
+ if (RedisModule_CreateCommand(ctx,"kspec.tworangeswithgap",kspec_impl,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ RedisModuleCommand *command = RedisModule_GetCommand(ctx,"kspec.tworangeswithgap");
+ RedisModuleCommandInfo info = {
+ .version = REDISMODULE_COMMAND_INFO_VERSION,
+ .arity = -2,
+ .key_specs = (RedisModuleCommandKeySpec[]){
+ {
+ .flags = REDISMODULE_CMD_KEY_RO | REDISMODULE_CMD_KEY_ACCESS,
+ .begin_search_type = REDISMODULE_KSPEC_BS_INDEX,
+ .bs.index.pos = 1,
+ .find_keys_type = REDISMODULE_KSPEC_FK_RANGE,
+ .fk.range = {0,1,0}
+ },
+ {
+ .flags = REDISMODULE_CMD_KEY_RW | REDISMODULE_CMD_KEY_UPDATE,
+ .begin_search_type = REDISMODULE_KSPEC_BS_INDEX,
+ .bs.index.pos = 3,
+ /* Omitted find_keys_type is shorthand for RANGE {0,1,0} */
+ },
+ {0}
+ }
+ };
+ if (RedisModule_SetCommandInfo(command, &info) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
+
+int createKspecKeyword(RedisModuleCtx *ctx) {
+ /* Only keyword-based specs. The legacy triple is wiped and set to (0,0,0). */
+ if (RedisModule_CreateCommand(ctx,"kspec.keyword",kspec_impl,"",3,-1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ RedisModuleCommand *command = RedisModule_GetCommand(ctx,"kspec.keyword");
+ RedisModuleCommandInfo info = {
+ .version = REDISMODULE_COMMAND_INFO_VERSION,
+ .key_specs = (RedisModuleCommandKeySpec[]){
+ {
+ .flags = REDISMODULE_CMD_KEY_RO | REDISMODULE_CMD_KEY_ACCESS,
+ .begin_search_type = REDISMODULE_KSPEC_BS_KEYWORD,
+ .bs.keyword.keyword = "KEYS",
+ .bs.keyword.startfrom = 1,
+ .find_keys_type = REDISMODULE_KSPEC_FK_RANGE,
+ .fk.range = {-1,1,0}
+ },
+ {0}
+ }
+ };
+ if (RedisModule_SetCommandInfo(command, &info) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
+
+int createKspecComplex1(RedisModuleCtx *ctx) {
+ /* First is a range a single key. The rest are keyword-based specs. */
+ if (RedisModule_CreateCommand(ctx,"kspec.complex1",kspec_impl,"",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ RedisModuleCommand *command = RedisModule_GetCommand(ctx,"kspec.complex1");
+ RedisModuleCommandInfo info = {
+ .version = REDISMODULE_COMMAND_INFO_VERSION,
+ .key_specs = (RedisModuleCommandKeySpec[]){
+ {
+ .flags = REDISMODULE_CMD_KEY_RO,
+ .begin_search_type = REDISMODULE_KSPEC_BS_INDEX,
+ .bs.index.pos = 1,
+ },
+ {
+ .flags = REDISMODULE_CMD_KEY_RW | REDISMODULE_CMD_KEY_UPDATE,
+ .begin_search_type = REDISMODULE_KSPEC_BS_KEYWORD,
+ .bs.keyword.keyword = "STORE",
+ .bs.keyword.startfrom = 2,
+ },
+ {
+ .flags = REDISMODULE_CMD_KEY_RO | REDISMODULE_CMD_KEY_ACCESS,
+ .begin_search_type = REDISMODULE_KSPEC_BS_KEYWORD,
+ .bs.keyword.keyword = "KEYS",
+ .bs.keyword.startfrom = 2,
+ .find_keys_type = REDISMODULE_KSPEC_FK_KEYNUM,
+ .fk.keynum = {0,1,1}
+ },
+ {0}
+ }
+ };
+ if (RedisModule_SetCommandInfo(command, &info) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
+
+int createKspecComplex2(RedisModuleCtx *ctx) {
+ /* First is not legacy, more than STATIC_KEYS_SPECS_NUM specs */
+ if (RedisModule_CreateCommand(ctx,"kspec.complex2",kspec_impl,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ RedisModuleCommand *command = RedisModule_GetCommand(ctx,"kspec.complex2");
+ RedisModuleCommandInfo info = {
+ .version = REDISMODULE_COMMAND_INFO_VERSION,
+ .key_specs = (RedisModuleCommandKeySpec[]){
+ {
+ .flags = REDISMODULE_CMD_KEY_RW | REDISMODULE_CMD_KEY_UPDATE,
+ .begin_search_type = REDISMODULE_KSPEC_BS_KEYWORD,
+ .bs.keyword.keyword = "STORE",
+ .bs.keyword.startfrom = 5,
+ .find_keys_type = REDISMODULE_KSPEC_FK_RANGE,
+ .fk.range = {0,1,0}
+ },
+ {
+ .flags = REDISMODULE_CMD_KEY_RO | REDISMODULE_CMD_KEY_ACCESS,
+ .begin_search_type = REDISMODULE_KSPEC_BS_INDEX,
+ .bs.index.pos = 1,
+ .find_keys_type = REDISMODULE_KSPEC_FK_RANGE,
+ .fk.range = {0,1,0}
+ },
+ {
+ .flags = REDISMODULE_CMD_KEY_RO | REDISMODULE_CMD_KEY_ACCESS,
+ .begin_search_type = REDISMODULE_KSPEC_BS_INDEX,
+ .bs.index.pos = 2,
+ .find_keys_type = REDISMODULE_KSPEC_FK_RANGE,
+ .fk.range = {0,1,0}
+ },
+ {
+ .flags = REDISMODULE_CMD_KEY_RW | REDISMODULE_CMD_KEY_UPDATE,
+ .begin_search_type = REDISMODULE_KSPEC_BS_INDEX,
+ .bs.index.pos = 3,
+ .find_keys_type = REDISMODULE_KSPEC_FK_KEYNUM,
+ .fk.keynum = {0,1,1}
+ },
+ {
+ .flags = REDISMODULE_CMD_KEY_RW | REDISMODULE_CMD_KEY_UPDATE,
+ .begin_search_type = REDISMODULE_KSPEC_BS_KEYWORD,
+ .bs.keyword.keyword = "MOREKEYS",
+ .bs.keyword.startfrom = 5,
+ .find_keys_type = REDISMODULE_KSPEC_FK_RANGE,
+ .fk.range = {-1,1,0}
+ },
+ {0}
+ }
+ };
+ if (RedisModule_SetCommandInfo(command, &info) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx, "keyspecs", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (createKspecNone(ctx) == REDISMODULE_ERR) return REDISMODULE_ERR;
+ if (createKspecNoneWithGetkeys(ctx) == REDISMODULE_ERR) return REDISMODULE_ERR;
+ if (createKspecTwoRanges(ctx) == REDISMODULE_ERR) return REDISMODULE_ERR;
+ if (createKspecTwoRangesWithGap(ctx) == REDISMODULE_ERR) return REDISMODULE_ERR;
+ if (createKspecKeyword(ctx) == REDISMODULE_ERR) return REDISMODULE_ERR;
+ if (createKspecComplex1(ctx) == REDISMODULE_ERR) return REDISMODULE_ERR;
+ if (createKspecComplex2(ctx) == REDISMODULE_ERR) return REDISMODULE_ERR;
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/list.c b/tests/modules/list.c
new file mode 100644
index 0000000..401b2d8
--- /dev/null
+++ b/tests/modules/list.c
@@ -0,0 +1,252 @@
+#include "redismodule.h"
+#include <assert.h>
+#include <errno.h>
+#include <strings.h>
+
+/* LIST.GETALL key [REVERSE] */
+int list_getall(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc < 2 || argc > 3) return RedisModule_WrongArity(ctx);
+ int reverse = (argc == 3 &&
+ !strcasecmp(RedisModule_StringPtrLen(argv[2], NULL),
+ "REVERSE"));
+ RedisModule_AutoMemory(ctx);
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_READ);
+ if (RedisModule_KeyType(key) != REDISMODULE_KEYTYPE_LIST) {
+ return RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
+ }
+ long n = RedisModule_ValueLength(key);
+ RedisModule_ReplyWithArray(ctx, n);
+ if (!reverse) {
+ for (long i = 0; i < n; i++) {
+ RedisModuleString *elem = RedisModule_ListGet(key, i);
+ RedisModule_ReplyWithString(ctx, elem);
+ RedisModule_FreeString(ctx, elem);
+ }
+ } else {
+ for (long i = -1; i >= -n; i--) {
+ RedisModuleString *elem = RedisModule_ListGet(key, i);
+ RedisModule_ReplyWithString(ctx, elem);
+ RedisModule_FreeString(ctx, elem);
+ }
+ }
+
+ /* Test error condition: index out of bounds */
+ assert(RedisModule_ListGet(key, n) == NULL);
+ assert(errno == EDOM); /* no more elements in list */
+
+ /* RedisModule_CloseKey(key); //implicit, done by auto memory */
+ return REDISMODULE_OK;
+}
+
+/* LIST.EDIT key [REVERSE] cmdstr [value ..]
+ *
+ * cmdstr is a string of the following characters:
+ *
+ * k -- keep
+ * d -- delete
+ * i -- insert value from args
+ * r -- replace with value from args
+ *
+ * The number of occurrences of "i" and "r" in cmdstr) should correspond to the
+ * number of args after cmdstr.
+ *
+ * Reply with a RESP3 Map, containing the number of edits (inserts, replaces, deletes)
+ * performed, as well as the last index and the entry it points to.
+ */
+int list_edit(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc < 3) return RedisModule_WrongArity(ctx);
+ RedisModule_AutoMemory(ctx);
+ int argpos = 1; /* the next arg */
+
+ /* key */
+ int keymode = REDISMODULE_READ | REDISMODULE_WRITE;
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[argpos++], keymode);
+ if (RedisModule_KeyType(key) != REDISMODULE_KEYTYPE_LIST) {
+ return RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
+ }
+
+ /* REVERSE */
+ int reverse = 0;
+ if (argc >= 4 &&
+ !strcasecmp(RedisModule_StringPtrLen(argv[argpos], NULL), "REVERSE")) {
+ reverse = 1;
+ argpos++;
+ }
+
+ /* cmdstr */
+ size_t cmdstr_len;
+ const char *cmdstr = RedisModule_StringPtrLen(argv[argpos++], &cmdstr_len);
+
+ /* validate cmdstr vs. argc */
+ long num_req_args = 0;
+ long min_list_length = 0;
+ for (size_t cmdpos = 0; cmdpos < cmdstr_len; cmdpos++) {
+ char c = cmdstr[cmdpos];
+ if (c == 'i' || c == 'r') num_req_args++;
+ if (c == 'd' || c == 'r' || c == 'k') min_list_length++;
+ }
+ if (argc < argpos + num_req_args) {
+ return RedisModule_ReplyWithError(ctx, "ERR too few args");
+ }
+ if ((long)RedisModule_ValueLength(key) < min_list_length) {
+ return RedisModule_ReplyWithError(ctx, "ERR list too short");
+ }
+
+ /* Iterate over the chars in cmdstr (edit instructions) */
+ long long num_inserts = 0, num_deletes = 0, num_replaces = 0;
+ long index = reverse ? -1 : 0;
+ RedisModuleString *value;
+
+ for (size_t cmdpos = 0; cmdpos < cmdstr_len; cmdpos++) {
+ switch (cmdstr[cmdpos]) {
+ case 'i': /* insert */
+ value = argv[argpos++];
+ assert(RedisModule_ListInsert(key, index, value) == REDISMODULE_OK);
+ index += reverse ? -1 : 1;
+ num_inserts++;
+ break;
+ case 'd': /* delete */
+ assert(RedisModule_ListDelete(key, index) == REDISMODULE_OK);
+ num_deletes++;
+ break;
+ case 'r': /* replace */
+ value = argv[argpos++];
+ assert(RedisModule_ListSet(key, index, value) == REDISMODULE_OK);
+ index += reverse ? -1 : 1;
+ num_replaces++;
+ break;
+ case 'k': /* keep */
+ index += reverse ? -1 : 1;
+ break;
+ }
+ }
+
+ RedisModuleString *v = RedisModule_ListGet(key, index);
+ RedisModule_ReplyWithMap(ctx, v ? 5 : 4);
+ RedisModule_ReplyWithCString(ctx, "i");
+ RedisModule_ReplyWithLongLong(ctx, num_inserts);
+ RedisModule_ReplyWithCString(ctx, "d");
+ RedisModule_ReplyWithLongLong(ctx, num_deletes);
+ RedisModule_ReplyWithCString(ctx, "r");
+ RedisModule_ReplyWithLongLong(ctx, num_replaces);
+ RedisModule_ReplyWithCString(ctx, "index");
+ RedisModule_ReplyWithLongLong(ctx, index);
+ if (v) {
+ RedisModule_ReplyWithCString(ctx, "entry");
+ RedisModule_ReplyWithString(ctx, v);
+ RedisModule_FreeString(ctx, v);
+ }
+
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+/* Reply based on errno as set by the List API functions. */
+static int replyByErrno(RedisModuleCtx *ctx) {
+ switch (errno) {
+ case EDOM:
+ return RedisModule_ReplyWithError(ctx, "ERR index out of bounds");
+ case ENOTSUP:
+ return RedisModule_ReplyWithError(ctx, REDISMODULE_ERRORMSG_WRONGTYPE);
+ default: assert(0); /* Can't happen */
+ }
+}
+
+/* LIST.GET key index */
+int list_get(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 3) return RedisModule_WrongArity(ctx);
+ long long index;
+ if (RedisModule_StringToLongLong(argv[2], &index) != REDISMODULE_OK) {
+ return RedisModule_ReplyWithError(ctx, "ERR index must be a number");
+ }
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_READ);
+ RedisModuleString *value = RedisModule_ListGet(key, index);
+ if (value) {
+ RedisModule_ReplyWithString(ctx, value);
+ RedisModule_FreeString(ctx, value);
+ } else {
+ replyByErrno(ctx);
+ }
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+/* LIST.SET key index value */
+int list_set(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 4) return RedisModule_WrongArity(ctx);
+ long long index;
+ if (RedisModule_StringToLongLong(argv[2], &index) != REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "ERR index must be a number");
+ return REDISMODULE_OK;
+ }
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+ if (RedisModule_ListSet(key, index, argv[3]) == REDISMODULE_OK) {
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ } else {
+ replyByErrno(ctx);
+ }
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+/* LIST.INSERT key index value
+ *
+ * If index is negative, value is inserted after, otherwise before the element
+ * at index.
+ */
+int list_insert(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 4) return RedisModule_WrongArity(ctx);
+ long long index;
+ if (RedisModule_StringToLongLong(argv[2], &index) != REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "ERR index must be a number");
+ return REDISMODULE_OK;
+ }
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+ if (RedisModule_ListInsert(key, index, argv[3]) == REDISMODULE_OK) {
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ } else {
+ replyByErrno(ctx);
+ }
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+/* LIST.DELETE key index */
+int list_delete(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 3) return RedisModule_WrongArity(ctx);
+ long long index;
+ if (RedisModule_StringToLongLong(argv[2], &index) != REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "ERR index must be a number");
+ return REDISMODULE_OK;
+ }
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+ if (RedisModule_ListDelete(key, index) == REDISMODULE_OK) {
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ } else {
+ replyByErrno(ctx);
+ }
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ if (RedisModule_Init(ctx, "list", 1, REDISMODULE_APIVER_1) == REDISMODULE_OK &&
+ RedisModule_CreateCommand(ctx, "list.getall", list_getall, "",
+ 1, 1, 1) == REDISMODULE_OK &&
+ RedisModule_CreateCommand(ctx, "list.edit", list_edit, "write",
+ 1, 1, 1) == REDISMODULE_OK &&
+ RedisModule_CreateCommand(ctx, "list.get", list_get, "write",
+ 1, 1, 1) == REDISMODULE_OK &&
+ RedisModule_CreateCommand(ctx, "list.set", list_set, "write",
+ 1, 1, 1) == REDISMODULE_OK &&
+ RedisModule_CreateCommand(ctx, "list.insert", list_insert, "write",
+ 1, 1, 1) == REDISMODULE_OK &&
+ RedisModule_CreateCommand(ctx, "list.delete", list_delete, "write",
+ 1, 1, 1) == REDISMODULE_OK) {
+ return REDISMODULE_OK;
+ } else {
+ return REDISMODULE_ERR;
+ }
+}
diff --git a/tests/modules/mallocsize.c b/tests/modules/mallocsize.c
new file mode 100644
index 0000000..a1d31c1
--- /dev/null
+++ b/tests/modules/mallocsize.c
@@ -0,0 +1,237 @@
+#include "redismodule.h"
+#include <string.h>
+#include <assert.h>
+#include <unistd.h>
+
+#define UNUSED(V) ((void) V)
+
+/* Registered type */
+RedisModuleType *mallocsize_type = NULL;
+
+typedef enum {
+ UDT_RAW,
+ UDT_STRING,
+ UDT_DICT
+} udt_type_t;
+
+typedef struct {
+ void *ptr;
+ size_t len;
+} raw_t;
+
+typedef struct {
+ udt_type_t type;
+ union {
+ raw_t raw;
+ RedisModuleString *str;
+ RedisModuleDict *dict;
+ } data;
+} udt_t;
+
+void udt_free(void *value) {
+ udt_t *udt = value;
+ switch (udt->type) {
+ case (UDT_RAW): {
+ RedisModule_Free(udt->data.raw.ptr);
+ break;
+ }
+ case (UDT_STRING): {
+ RedisModule_FreeString(NULL, udt->data.str);
+ break;
+ }
+ case (UDT_DICT): {
+ RedisModuleString *dk, *dv;
+ RedisModuleDictIter *iter = RedisModule_DictIteratorStartC(udt->data.dict, "^", NULL, 0);
+ while((dk = RedisModule_DictNext(NULL, iter, (void **)&dv)) != NULL) {
+ RedisModule_FreeString(NULL, dk);
+ RedisModule_FreeString(NULL, dv);
+ }
+ RedisModule_DictIteratorStop(iter);
+ RedisModule_FreeDict(NULL, udt->data.dict);
+ break;
+ }
+ }
+ RedisModule_Free(udt);
+}
+
+void udt_rdb_save(RedisModuleIO *rdb, void *value) {
+ udt_t *udt = value;
+ RedisModule_SaveUnsigned(rdb, udt->type);
+ switch (udt->type) {
+ case (UDT_RAW): {
+ RedisModule_SaveStringBuffer(rdb, udt->data.raw.ptr, udt->data.raw.len);
+ break;
+ }
+ case (UDT_STRING): {
+ RedisModule_SaveString(rdb, udt->data.str);
+ break;
+ }
+ case (UDT_DICT): {
+ RedisModule_SaveUnsigned(rdb, RedisModule_DictSize(udt->data.dict));
+ RedisModuleString *dk, *dv;
+ RedisModuleDictIter *iter = RedisModule_DictIteratorStartC(udt->data.dict, "^", NULL, 0);
+ while((dk = RedisModule_DictNext(NULL, iter, (void **)&dv)) != NULL) {
+ RedisModule_SaveString(rdb, dk);
+ RedisModule_SaveString(rdb, dv);
+ RedisModule_FreeString(NULL, dk); /* Allocated by RedisModule_DictNext */
+ }
+ RedisModule_DictIteratorStop(iter);
+ break;
+ }
+ }
+}
+
+void *udt_rdb_load(RedisModuleIO *rdb, int encver) {
+ if (encver != 0)
+ return NULL;
+ udt_t *udt = RedisModule_Alloc(sizeof(*udt));
+ udt->type = RedisModule_LoadUnsigned(rdb);
+ switch (udt->type) {
+ case (UDT_RAW): {
+ udt->data.raw.ptr = RedisModule_LoadStringBuffer(rdb, &udt->data.raw.len);
+ break;
+ }
+ case (UDT_STRING): {
+ udt->data.str = RedisModule_LoadString(rdb);
+ break;
+ }
+ case (UDT_DICT): {
+ long long dict_len = RedisModule_LoadUnsigned(rdb);
+ udt->data.dict = RedisModule_CreateDict(NULL);
+ for (int i = 0; i < dict_len; i += 2) {
+ RedisModuleString *key = RedisModule_LoadString(rdb);
+ RedisModuleString *val = RedisModule_LoadString(rdb);
+ RedisModule_DictSet(udt->data.dict, key, val);
+ }
+ break;
+ }
+ }
+
+ return udt;
+}
+
+size_t udt_mem_usage(RedisModuleKeyOptCtx *ctx, const void *value, size_t sample_size) {
+ UNUSED(ctx);
+ UNUSED(sample_size);
+
+ const udt_t *udt = value;
+ size_t size = sizeof(*udt);
+
+ switch (udt->type) {
+ case (UDT_RAW): {
+ size += RedisModule_MallocSize(udt->data.raw.ptr);
+ break;
+ }
+ case (UDT_STRING): {
+ size += RedisModule_MallocSizeString(udt->data.str);
+ break;
+ }
+ case (UDT_DICT): {
+ void *dk;
+ size_t keylen;
+ RedisModuleString *dv;
+ RedisModuleDictIter *iter = RedisModule_DictIteratorStartC(udt->data.dict, "^", NULL, 0);
+ while((dk = RedisModule_DictNextC(iter, &keylen, (void **)&dv)) != NULL) {
+ size += keylen;
+ size += RedisModule_MallocSizeString(dv);
+ }
+ RedisModule_DictIteratorStop(iter);
+ break;
+ }
+ }
+
+ return size;
+}
+
+/* MALLOCSIZE.SETRAW key len */
+int cmd_setraw(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 3)
+ return RedisModule_WrongArity(ctx);
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+
+ udt_t *udt = RedisModule_Alloc(sizeof(*udt));
+ udt->type = UDT_RAW;
+
+ long long raw_len;
+ RedisModule_StringToLongLong(argv[2], &raw_len);
+ udt->data.raw.ptr = RedisModule_Alloc(raw_len);
+ udt->data.raw.len = raw_len;
+
+ RedisModule_ModuleTypeSetValue(key, mallocsize_type, udt);
+ RedisModule_CloseKey(key);
+
+ return RedisModule_ReplyWithSimpleString(ctx, "OK");
+}
+
+/* MALLOCSIZE.SETSTR key string */
+int cmd_setstr(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 3)
+ return RedisModule_WrongArity(ctx);
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+
+ udt_t *udt = RedisModule_Alloc(sizeof(*udt));
+ udt->type = UDT_STRING;
+
+ udt->data.str = argv[2];
+ RedisModule_RetainString(ctx, argv[2]);
+
+ RedisModule_ModuleTypeSetValue(key, mallocsize_type, udt);
+ RedisModule_CloseKey(key);
+
+ return RedisModule_ReplyWithSimpleString(ctx, "OK");
+}
+
+/* MALLOCSIZE.SETDICT key field value [field value ...] */
+int cmd_setdict(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc < 4 || argc % 2)
+ return RedisModule_WrongArity(ctx);
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+
+ udt_t *udt = RedisModule_Alloc(sizeof(*udt));
+ udt->type = UDT_DICT;
+
+ udt->data.dict = RedisModule_CreateDict(ctx);
+ for (int i = 2; i < argc; i += 2) {
+ RedisModule_DictSet(udt->data.dict, argv[i], argv[i+1]);
+ /* No need to retain argv[i], it is copied as the rax key */
+ RedisModule_RetainString(ctx, argv[i+1]);
+ }
+
+ RedisModule_ModuleTypeSetValue(key, mallocsize_type, udt);
+ RedisModule_CloseKey(key);
+
+ return RedisModule_ReplyWithSimpleString(ctx, "OK");
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+ if (RedisModule_Init(ctx,"mallocsize",1,REDISMODULE_APIVER_1)== REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ RedisModuleTypeMethods tm = {
+ .version = REDISMODULE_TYPE_METHOD_VERSION,
+ .rdb_load = udt_rdb_load,
+ .rdb_save = udt_rdb_save,
+ .free = udt_free,
+ .mem_usage2 = udt_mem_usage,
+ };
+
+ mallocsize_type = RedisModule_CreateDataType(ctx, "allocsize", 0, &tm);
+ if (mallocsize_type == NULL)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "mallocsize.setraw", cmd_setraw, "", 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "mallocsize.setstr", cmd_setstr, "", 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "mallocsize.setdict", cmd_setdict, "", 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/misc.c b/tests/modules/misc.c
new file mode 100644
index 0000000..46bfcb1
--- /dev/null
+++ b/tests/modules/misc.c
@@ -0,0 +1,571 @@
+#include "redismodule.h"
+
+#include <string.h>
+#include <assert.h>
+#include <unistd.h>
+#include <errno.h>
+#include <limits.h>
+
+#define UNUSED(x) (void)(x)
+
+static int n_events = 0;
+
+static int KeySpace_NotificationModuleKeyMissExpired(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key) {
+ UNUSED(ctx);
+ UNUSED(type);
+ UNUSED(event);
+ UNUSED(key);
+ n_events++;
+ return REDISMODULE_OK;
+}
+
+int test_clear_n_events(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+ n_events = 0;
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+int test_get_n_events(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+ RedisModule_ReplyWithLongLong(ctx, n_events);
+ return REDISMODULE_OK;
+}
+
+int test_open_key_no_effects(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc<2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ int supportedMode = RedisModule_GetOpenKeyModesAll();
+ if (!(supportedMode & REDISMODULE_READ) || !(supportedMode & REDISMODULE_OPEN_KEY_NOEFFECTS)) {
+ RedisModule_ReplyWithError(ctx, "OpenKey modes are not supported");
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_READ | REDISMODULE_OPEN_KEY_NOEFFECTS);
+ if (!key) {
+ RedisModule_ReplyWithError(ctx, "key not found");
+ return REDISMODULE_OK;
+ }
+
+ RedisModule_CloseKey(key);
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+int test_call_generic(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc<2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ const char* cmdname = RedisModule_StringPtrLen(argv[1], NULL);
+ RedisModuleCallReply *reply = RedisModule_Call(ctx, cmdname, "v", argv+2, argc-2);
+ if (reply) {
+ RedisModule_ReplyWithCallReply(ctx, reply);
+ RedisModule_FreeCallReply(reply);
+ } else {
+ RedisModule_ReplyWithError(ctx, strerror(errno));
+ }
+ return REDISMODULE_OK;
+}
+
+int test_call_info(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ RedisModuleCallReply *reply;
+ if (argc>1)
+ reply = RedisModule_Call(ctx, "info", "s", argv[1]);
+ else
+ reply = RedisModule_Call(ctx, "info", "");
+ if (reply) {
+ RedisModule_ReplyWithCallReply(ctx, reply);
+ RedisModule_FreeCallReply(reply);
+ } else {
+ RedisModule_ReplyWithError(ctx, strerror(errno));
+ }
+ return REDISMODULE_OK;
+}
+
+int test_ld_conv(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+ long double ld = 0.00000000000000001L;
+ const char *ldstr = "0.00000000000000001";
+ RedisModuleString *s1 = RedisModule_CreateStringFromLongDouble(ctx, ld, 1);
+ RedisModuleString *s2 =
+ RedisModule_CreateString(ctx, ldstr, strlen(ldstr));
+ if (RedisModule_StringCompare(s1, s2) != 0) {
+ char err[4096];
+ snprintf(err, 4096,
+ "Failed to convert long double to string ('%s' != '%s')",
+ RedisModule_StringPtrLen(s1, NULL),
+ RedisModule_StringPtrLen(s2, NULL));
+ RedisModule_ReplyWithError(ctx, err);
+ goto final;
+ }
+ long double ld2 = 0;
+ if (RedisModule_StringToLongDouble(s2, &ld2) == REDISMODULE_ERR) {
+ RedisModule_ReplyWithError(ctx,
+ "Failed to convert string to long double");
+ goto final;
+ }
+ if (ld2 != ld) {
+ char err[4096];
+ snprintf(err, 4096,
+ "Failed to convert string to long double (%.40Lf != %.40Lf)",
+ ld2,
+ ld);
+ RedisModule_ReplyWithError(ctx, err);
+ goto final;
+ }
+
+ /* Make sure we can't convert a string that has \0 in it */
+ char buf[4] = "123";
+ buf[1] = '\0';
+ RedisModuleString *s3 = RedisModule_CreateString(ctx, buf, 3);
+ long double ld3;
+ if (RedisModule_StringToLongDouble(s3, &ld3) == REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "Invalid string successfully converted to long double");
+ RedisModule_FreeString(ctx, s3);
+ goto final;
+ }
+ RedisModule_FreeString(ctx, s3);
+
+ RedisModule_ReplyWithLongDouble(ctx, ld2);
+final:
+ RedisModule_FreeString(ctx, s1);
+ RedisModule_FreeString(ctx, s2);
+ return REDISMODULE_OK;
+}
+
+int test_flushall(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ RedisModule_ResetDataset(1, 0);
+ RedisModule_ReplyWithCString(ctx, "Ok");
+ return REDISMODULE_OK;
+}
+
+int test_dbsize(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ long long ll = RedisModule_DbSize(ctx);
+ RedisModule_ReplyWithLongLong(ctx, ll);
+ return REDISMODULE_OK;
+}
+
+int test_randomkey(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ RedisModuleString *str = RedisModule_RandomKey(ctx);
+ RedisModule_ReplyWithString(ctx, str);
+ RedisModule_FreeString(ctx, str);
+ return REDISMODULE_OK;
+}
+
+int test_keyexists(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc < 2) return RedisModule_WrongArity(ctx);
+ RedisModuleString *key = argv[1];
+ int exists = RedisModule_KeyExists(ctx, key);
+ return RedisModule_ReplyWithBool(ctx, exists);
+}
+
+RedisModuleKey *open_key_or_reply(RedisModuleCtx *ctx, RedisModuleString *keyname, int mode) {
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, keyname, mode);
+ if (!key) {
+ RedisModule_ReplyWithError(ctx, "key not found");
+ return NULL;
+ }
+ return key;
+}
+
+int test_getlru(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc<2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+ RedisModuleKey *key = open_key_or_reply(ctx, argv[1], REDISMODULE_READ|REDISMODULE_OPEN_KEY_NOTOUCH);
+ mstime_t lru;
+ RedisModule_GetLRU(key, &lru);
+ RedisModule_ReplyWithLongLong(ctx, lru);
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+int test_setlru(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc<3) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+ RedisModuleKey *key = open_key_or_reply(ctx, argv[1], REDISMODULE_READ|REDISMODULE_OPEN_KEY_NOTOUCH);
+ mstime_t lru;
+ if (RedisModule_StringToLongLong(argv[2], &lru) != REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "invalid idle time");
+ return REDISMODULE_OK;
+ }
+ int was_set = RedisModule_SetLRU(key, lru)==REDISMODULE_OK;
+ RedisModule_ReplyWithLongLong(ctx, was_set);
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+int test_getlfu(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc<2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+ RedisModuleKey *key = open_key_or_reply(ctx, argv[1], REDISMODULE_READ|REDISMODULE_OPEN_KEY_NOTOUCH);
+ mstime_t lfu;
+ RedisModule_GetLFU(key, &lfu);
+ RedisModule_ReplyWithLongLong(ctx, lfu);
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+int test_setlfu(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc<3) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+ RedisModuleKey *key = open_key_or_reply(ctx, argv[1], REDISMODULE_READ|REDISMODULE_OPEN_KEY_NOTOUCH);
+ mstime_t lfu;
+ if (RedisModule_StringToLongLong(argv[2], &lfu) != REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "invalid freq");
+ return REDISMODULE_OK;
+ }
+ int was_set = RedisModule_SetLFU(key, lfu)==REDISMODULE_OK;
+ RedisModule_ReplyWithLongLong(ctx, was_set);
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+int test_redisversion(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ (void) argv;
+ (void) argc;
+
+ int version = RedisModule_GetServerVersion();
+ int patch = version & 0x000000ff;
+ int minor = (version & 0x0000ff00) >> 8;
+ int major = (version & 0x00ff0000) >> 16;
+
+ RedisModuleString* vStr = RedisModule_CreateStringPrintf(ctx, "%d.%d.%d", major, minor, patch);
+ RedisModule_ReplyWithString(ctx, vStr);
+ RedisModule_FreeString(ctx, vStr);
+
+ return REDISMODULE_OK;
+}
+
+int test_getclientcert(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ (void) argv;
+ (void) argc;
+
+ RedisModuleString *cert = RedisModule_GetClientCertificate(ctx,
+ RedisModule_GetClientId(ctx));
+ if (!cert) {
+ RedisModule_ReplyWithNull(ctx);
+ } else {
+ RedisModule_ReplyWithString(ctx, cert);
+ RedisModule_FreeString(ctx, cert);
+ }
+
+ return REDISMODULE_OK;
+}
+
+int test_clientinfo(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ (void) argv;
+ (void) argc;
+
+ RedisModuleClientInfoV1 ci = REDISMODULE_CLIENTINFO_INITIALIZER_V1;
+ uint64_t client_id = RedisModule_GetClientId(ctx);
+
+ /* Check expected result from the V1 initializer. */
+ assert(ci.version == 1);
+ /* Trying to populate a future version of the struct should fail. */
+ ci.version = REDISMODULE_CLIENTINFO_VERSION + 1;
+ assert(RedisModule_GetClientInfoById(&ci, client_id) == REDISMODULE_ERR);
+
+ ci.version = 1;
+ if (RedisModule_GetClientInfoById(&ci, client_id) == REDISMODULE_ERR) {
+ RedisModule_ReplyWithError(ctx, "failed to get client info");
+ return REDISMODULE_OK;
+ }
+
+ RedisModule_ReplyWithArray(ctx, 10);
+ char flags[512];
+ snprintf(flags, sizeof(flags) - 1, "%s:%s:%s:%s:%s:%s",
+ ci.flags & REDISMODULE_CLIENTINFO_FLAG_SSL ? "ssl" : "",
+ ci.flags & REDISMODULE_CLIENTINFO_FLAG_PUBSUB ? "pubsub" : "",
+ ci.flags & REDISMODULE_CLIENTINFO_FLAG_BLOCKED ? "blocked" : "",
+ ci.flags & REDISMODULE_CLIENTINFO_FLAG_TRACKING ? "tracking" : "",
+ ci.flags & REDISMODULE_CLIENTINFO_FLAG_UNIXSOCKET ? "unixsocket" : "",
+ ci.flags & REDISMODULE_CLIENTINFO_FLAG_MULTI ? "multi" : "");
+
+ RedisModule_ReplyWithCString(ctx, "flags");
+ RedisModule_ReplyWithCString(ctx, flags);
+ RedisModule_ReplyWithCString(ctx, "id");
+ RedisModule_ReplyWithLongLong(ctx, ci.id);
+ RedisModule_ReplyWithCString(ctx, "addr");
+ RedisModule_ReplyWithCString(ctx, ci.addr);
+ RedisModule_ReplyWithCString(ctx, "port");
+ RedisModule_ReplyWithLongLong(ctx, ci.port);
+ RedisModule_ReplyWithCString(ctx, "db");
+ RedisModule_ReplyWithLongLong(ctx, ci.db);
+
+ return REDISMODULE_OK;
+}
+
+int test_getname(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ (void)argv;
+ if (argc != 1) return RedisModule_WrongArity(ctx);
+ unsigned long long id = RedisModule_GetClientId(ctx);
+ RedisModuleString *name = RedisModule_GetClientNameById(ctx, id);
+ if (name == NULL)
+ return RedisModule_ReplyWithError(ctx, "-ERR No name");
+ RedisModule_ReplyWithString(ctx, name);
+ RedisModule_FreeString(ctx, name);
+ return REDISMODULE_OK;
+}
+
+int test_setname(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) return RedisModule_WrongArity(ctx);
+ unsigned long long id = RedisModule_GetClientId(ctx);
+ if (RedisModule_SetClientNameById(id, argv[1]) == REDISMODULE_OK)
+ return RedisModule_ReplyWithSimpleString(ctx, "OK");
+ else
+ return RedisModule_ReplyWithError(ctx, strerror(errno));
+}
+
+int test_log_tsctx(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ RedisModuleCtx *tsctx = RedisModule_GetDetachedThreadSafeContext(ctx);
+
+ if (argc != 3) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ char level[50];
+ size_t level_len;
+ const char *level_str = RedisModule_StringPtrLen(argv[1], &level_len);
+ snprintf(level, sizeof(level) - 1, "%.*s", (int) level_len, level_str);
+
+ size_t msg_len;
+ const char *msg_str = RedisModule_StringPtrLen(argv[2], &msg_len);
+
+ RedisModule_Log(tsctx, level, "%.*s", (int) msg_len, msg_str);
+ RedisModule_FreeThreadSafeContext(tsctx);
+
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+int test_weird_cmd(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+int test_monotonic_time(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModule_ReplyWithLongLong(ctx, RedisModule_MonotonicMicroseconds());
+ return REDISMODULE_OK;
+}
+
+/* wrapper for RM_Call */
+int test_rm_call(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ if(argc < 2){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ const char* cmd = RedisModule_StringPtrLen(argv[1], NULL);
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, cmd, "Ev", argv + 2, argc - 2);
+ if(!rep){
+ RedisModule_ReplyWithError(ctx, "NULL reply returned");
+ }else{
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ }
+
+ return REDISMODULE_OK;
+}
+
+/* wrapper for RM_Call which also replicates the module command */
+int test_rm_call_replicate(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ test_rm_call(ctx, argv, argc);
+ RedisModule_ReplicateVerbatim(ctx);
+
+ return REDISMODULE_OK;
+}
+
+/* wrapper for RM_Call with flags */
+int test_rm_call_flags(RedisModuleCtx *ctx, RedisModuleString **argv, int argc){
+ if(argc < 3){
+ return RedisModule_WrongArity(ctx);
+ }
+
+ /* Append Ev to the provided flags. */
+ RedisModuleString *flags = RedisModule_CreateStringFromString(ctx, argv[1]);
+ RedisModule_StringAppendBuffer(ctx, flags, "Ev", 2);
+
+ const char* flg = RedisModule_StringPtrLen(flags, NULL);
+ const char* cmd = RedisModule_StringPtrLen(argv[2], NULL);
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, cmd, flg, argv + 3, argc - 3);
+ if(!rep){
+ RedisModule_ReplyWithError(ctx, "NULL reply returned");
+ }else{
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ }
+ RedisModule_FreeString(ctx, flags);
+
+ return REDISMODULE_OK;
+}
+
+int test_ull_conv(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+ unsigned long long ull = 18446744073709551615ULL;
+ const char *ullstr = "18446744073709551615";
+
+ RedisModuleString *s1 = RedisModule_CreateStringFromULongLong(ctx, ull);
+ RedisModuleString *s2 =
+ RedisModule_CreateString(ctx, ullstr, strlen(ullstr));
+ if (RedisModule_StringCompare(s1, s2) != 0) {
+ char err[4096];
+ snprintf(err, 4096,
+ "Failed to convert unsigned long long to string ('%s' != '%s')",
+ RedisModule_StringPtrLen(s1, NULL),
+ RedisModule_StringPtrLen(s2, NULL));
+ RedisModule_ReplyWithError(ctx, err);
+ goto final;
+ }
+ unsigned long long ull2 = 0;
+ if (RedisModule_StringToULongLong(s2, &ull2) == REDISMODULE_ERR) {
+ RedisModule_ReplyWithError(ctx,
+ "Failed to convert string to unsigned long long");
+ goto final;
+ }
+ if (ull2 != ull) {
+ char err[4096];
+ snprintf(err, 4096,
+ "Failed to convert string to unsigned long long (%llu != %llu)",
+ ull2,
+ ull);
+ RedisModule_ReplyWithError(ctx, err);
+ goto final;
+ }
+
+ /* Make sure we can't convert a string more than ULLONG_MAX or less than 0 */
+ ullstr = "18446744073709551616";
+ RedisModuleString *s3 = RedisModule_CreateString(ctx, ullstr, strlen(ullstr));
+ unsigned long long ull3;
+ if (RedisModule_StringToULongLong(s3, &ull3) == REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "Invalid string successfully converted to unsigned long long");
+ RedisModule_FreeString(ctx, s3);
+ goto final;
+ }
+ RedisModule_FreeString(ctx, s3);
+ ullstr = "-1";
+ RedisModuleString *s4 = RedisModule_CreateString(ctx, ullstr, strlen(ullstr));
+ unsigned long long ull4;
+ if (RedisModule_StringToULongLong(s4, &ull4) == REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "Invalid string successfully converted to unsigned long long");
+ RedisModule_FreeString(ctx, s4);
+ goto final;
+ }
+ RedisModule_FreeString(ctx, s4);
+
+ RedisModule_ReplyWithSimpleString(ctx, "ok");
+
+final:
+ RedisModule_FreeString(ctx, s1);
+ RedisModule_FreeString(ctx, s2);
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ if (RedisModule_Init(ctx,"misc",1,REDISMODULE_APIVER_1)== REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if(RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_KEY_MISS | REDISMODULE_NOTIFY_EXPIRED, KeySpace_NotificationModuleKeyMissExpired) != REDISMODULE_OK){
+ return REDISMODULE_ERR;
+ }
+
+ if (RedisModule_CreateCommand(ctx,"test.call_generic", test_call_generic,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.call_info", test_call_info,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.ld_conversion", test_ld_conv, "",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.ull_conversion", test_ull_conv, "",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.flushall", test_flushall,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.dbsize", test_dbsize,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.randomkey", test_randomkey,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.keyexists", test_keyexists,"",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.setlru", test_setlru,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.getlru", test_getlru,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.setlfu", test_setlfu,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.getlfu", test_getlfu,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.clientinfo", test_clientinfo,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.getname", test_getname,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.setname", test_setname,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.redisversion", test_redisversion,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.getclientcert", test_getclientcert,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.log_tsctx", test_log_tsctx,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ /* Add a command with ':' in it's name, so that we can check commandstats sanitization. */
+ if (RedisModule_CreateCommand(ctx,"test.weird:cmd", test_weird_cmd,"readonly",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.monotonic_time", test_monotonic_time,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx, "test.rm_call", test_rm_call,"allow-stale", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx, "test.rm_call_flags", test_rm_call_flags,"allow-stale", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx, "test.rm_call_replicate", test_rm_call_replicate,"allow-stale", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx, "test.silent_open_key", test_open_key_no_effects,"", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx, "test.get_n_events", test_get_n_events,"", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx, "test.clear_n_events", test_clear_n_events,"", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/moduleauthtwo.c b/tests/modules/moduleauthtwo.c
new file mode 100644
index 0000000..0a4f56b
--- /dev/null
+++ b/tests/modules/moduleauthtwo.c
@@ -0,0 +1,43 @@
+#include "redismodule.h"
+
+#include <string.h>
+
+/* This is a second sample module to validate that module authentication callbacks can be registered
+ * from multiple modules. */
+
+/* Non Blocking Module Auth callback / implementation. */
+int auth_cb(RedisModuleCtx *ctx, RedisModuleString *username, RedisModuleString *password, RedisModuleString **err) {
+ const char *user = RedisModule_StringPtrLen(username, NULL);
+ const char *pwd = RedisModule_StringPtrLen(password, NULL);
+ if (!strcmp(user,"foo") && !strcmp(pwd,"allow_two")) {
+ RedisModule_AuthenticateClientWithACLUser(ctx, "foo", 3, NULL, NULL, NULL);
+ return REDISMODULE_AUTH_HANDLED;
+ }
+ else if (!strcmp(user,"foo") && !strcmp(pwd,"deny_two")) {
+ RedisModuleString *log = RedisModule_CreateString(ctx, "Module Auth", 11);
+ RedisModule_ACLAddLogEntryByUserName(ctx, username, log, REDISMODULE_ACL_LOG_AUTH);
+ RedisModule_FreeString(ctx, log);
+ const char *err_msg = "Auth denied by Misc Module.";
+ *err = RedisModule_CreateString(ctx, err_msg, strlen(err_msg));
+ return REDISMODULE_AUTH_HANDLED;
+ }
+ return REDISMODULE_AUTH_NOT_HANDLED;
+}
+
+int test_rm_register_auth_cb(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ RedisModule_RegisterAuthCallback(ctx, auth_cb);
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ if (RedisModule_Init(ctx,"moduleauthtwo",1,REDISMODULE_APIVER_1)== REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"testmoduletwo.rm_register_auth_cb", test_rm_register_auth_cb,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ return REDISMODULE_OK;
+} \ No newline at end of file
diff --git a/tests/modules/moduleconfigs.c b/tests/modules/moduleconfigs.c
new file mode 100644
index 0000000..2c1737d
--- /dev/null
+++ b/tests/modules/moduleconfigs.c
@@ -0,0 +1,195 @@
+#include "redismodule.h"
+#include <strings.h>
+int mutable_bool_val;
+int immutable_bool_val;
+long long longval;
+long long memval;
+RedisModuleString *strval = NULL;
+int enumval;
+int flagsval;
+
+/* Series of get and set callbacks for each type of config, these rely on the privdata ptr
+ * to point to the config, and they register the configs as such. Note that one could also just
+ * use names if they wanted, and store anything in privdata. */
+int getBoolConfigCommand(const char *name, void *privdata) {
+ REDISMODULE_NOT_USED(name);
+ return (*(int *)privdata);
+}
+
+int setBoolConfigCommand(const char *name, int new, void *privdata, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(name);
+ REDISMODULE_NOT_USED(err);
+ *(int *)privdata = new;
+ return REDISMODULE_OK;
+}
+
+long long getNumericConfigCommand(const char *name, void *privdata) {
+ REDISMODULE_NOT_USED(name);
+ return (*(long long *) privdata);
+}
+
+int setNumericConfigCommand(const char *name, long long new, void *privdata, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(name);
+ REDISMODULE_NOT_USED(err);
+ *(long long *)privdata = new;
+ return REDISMODULE_OK;
+}
+
+RedisModuleString *getStringConfigCommand(const char *name, void *privdata) {
+ REDISMODULE_NOT_USED(name);
+ REDISMODULE_NOT_USED(privdata);
+ return strval;
+}
+int setStringConfigCommand(const char *name, RedisModuleString *new, void *privdata, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(name);
+ REDISMODULE_NOT_USED(err);
+ REDISMODULE_NOT_USED(privdata);
+ size_t len;
+ if (!strcasecmp(RedisModule_StringPtrLen(new, &len), "rejectisfreed")) {
+ *err = RedisModule_CreateString(NULL, "Cannot set string to 'rejectisfreed'", 36);
+ return REDISMODULE_ERR;
+ }
+ if (strval) RedisModule_FreeString(NULL, strval);
+ RedisModule_RetainString(NULL, new);
+ strval = new;
+ return REDISMODULE_OK;
+}
+
+int getEnumConfigCommand(const char *name, void *privdata) {
+ REDISMODULE_NOT_USED(name);
+ REDISMODULE_NOT_USED(privdata);
+ return enumval;
+}
+
+int setEnumConfigCommand(const char *name, int val, void *privdata, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(name);
+ REDISMODULE_NOT_USED(err);
+ REDISMODULE_NOT_USED(privdata);
+ enumval = val;
+ return REDISMODULE_OK;
+}
+
+int getFlagsConfigCommand(const char *name, void *privdata) {
+ REDISMODULE_NOT_USED(name);
+ REDISMODULE_NOT_USED(privdata);
+ return flagsval;
+}
+
+int setFlagsConfigCommand(const char *name, int val, void *privdata, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(name);
+ REDISMODULE_NOT_USED(err);
+ REDISMODULE_NOT_USED(privdata);
+ flagsval = val;
+ return REDISMODULE_OK;
+}
+
+int boolApplyFunc(RedisModuleCtx *ctx, void *privdata, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(privdata);
+ if (mutable_bool_val && immutable_bool_val) {
+ *err = RedisModule_CreateString(NULL, "Bool configs cannot both be yes.", 32);
+ return REDISMODULE_ERR;
+ }
+ return REDISMODULE_OK;
+}
+
+int longlongApplyFunc(RedisModuleCtx *ctx, void *privdata, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(privdata);
+ if (longval == memval) {
+ *err = RedisModule_CreateString(NULL, "These configs cannot equal each other.", 38);
+ return REDISMODULE_ERR;
+ }
+ return REDISMODULE_OK;
+}
+
+int registerBlockCheck(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ int response_ok = 0;
+ int result = RedisModule_RegisterBoolConfig(ctx, "mutable_bool", 1, REDISMODULE_CONFIG_DEFAULT, getBoolConfigCommand, setBoolConfigCommand, boolApplyFunc, &mutable_bool_val);
+ response_ok |= (result == REDISMODULE_OK);
+
+ result = RedisModule_RegisterStringConfig(ctx, "string", "secret password", REDISMODULE_CONFIG_DEFAULT, getStringConfigCommand, setStringConfigCommand, NULL, NULL);
+ response_ok |= (result == REDISMODULE_OK);
+
+ const char *enum_vals[] = {"none", "five", "one", "two", "four"};
+ const int int_vals[] = {0, 5, 1, 2, 4};
+ result = RedisModule_RegisterEnumConfig(ctx, "enum", 1, REDISMODULE_CONFIG_DEFAULT, enum_vals, int_vals, 5, getEnumConfigCommand, setEnumConfigCommand, NULL, NULL);
+ response_ok |= (result == REDISMODULE_OK);
+
+ result = RedisModule_RegisterNumericConfig(ctx, "numeric", -1, REDISMODULE_CONFIG_DEFAULT, -5, 2000, getNumericConfigCommand, setNumericConfigCommand, longlongApplyFunc, &longval);
+ response_ok |= (result == REDISMODULE_OK);
+
+ result = RedisModule_LoadConfigs(ctx);
+ response_ok |= (result == REDISMODULE_OK);
+
+ /* This validates that it's not possible to register/load configs outside OnLoad,
+ * thus returns an error if they succeed. */
+ if (response_ok) {
+ RedisModule_ReplyWithError(ctx, "UNEXPECTEDOK");
+ } else {
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ }
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx, "moduleconfigs", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ if (RedisModule_RegisterBoolConfig(ctx, "mutable_bool", 1, REDISMODULE_CONFIG_DEFAULT, getBoolConfigCommand, setBoolConfigCommand, boolApplyFunc, &mutable_bool_val) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+ /* Immutable config here. */
+ if (RedisModule_RegisterBoolConfig(ctx, "immutable_bool", 0, REDISMODULE_CONFIG_IMMUTABLE, getBoolConfigCommand, setBoolConfigCommand, boolApplyFunc, &immutable_bool_val) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+ if (RedisModule_RegisterStringConfig(ctx, "string", "secret password", REDISMODULE_CONFIG_DEFAULT, getStringConfigCommand, setStringConfigCommand, NULL, NULL) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+
+ /* On the stack to make sure we're copying them. */
+ const char *enum_vals[] = {"none", "five", "one", "two", "four"};
+ const int int_vals[] = {0, 5, 1, 2, 4};
+
+ if (RedisModule_RegisterEnumConfig(ctx, "enum", 1, REDISMODULE_CONFIG_DEFAULT, enum_vals, int_vals, 5, getEnumConfigCommand, setEnumConfigCommand, NULL, NULL) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+ if (RedisModule_RegisterEnumConfig(ctx, "flags", 3, REDISMODULE_CONFIG_DEFAULT | REDISMODULE_CONFIG_BITFLAGS, enum_vals, int_vals, 5, getFlagsConfigCommand, setFlagsConfigCommand, NULL, NULL) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+ /* Memory config here. */
+ if (RedisModule_RegisterNumericConfig(ctx, "memory_numeric", 1024, REDISMODULE_CONFIG_DEFAULT | REDISMODULE_CONFIG_MEMORY, 0, 3000000, getNumericConfigCommand, setNumericConfigCommand, longlongApplyFunc, &memval) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+ if (RedisModule_RegisterNumericConfig(ctx, "numeric", -1, REDISMODULE_CONFIG_DEFAULT, -5, 2000, getNumericConfigCommand, setNumericConfigCommand, longlongApplyFunc, &longval) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+ size_t len;
+ if (argc && !strcasecmp(RedisModule_StringPtrLen(argv[0], &len), "noload")) {
+ return REDISMODULE_OK;
+ } else if (RedisModule_LoadConfigs(ctx) == REDISMODULE_ERR) {
+ if (strval) {
+ RedisModule_FreeString(ctx, strval);
+ strval = NULL;
+ }
+ return REDISMODULE_ERR;
+ }
+ /* Creates a command which registers configs outside OnLoad() function. */
+ if (RedisModule_CreateCommand(ctx,"block.register.configs.outside.onload", registerBlockCheck, "write", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnUnload(RedisModuleCtx *ctx) {
+ REDISMODULE_NOT_USED(ctx);
+ if (strval) {
+ RedisModule_FreeString(ctx, strval);
+ strval = NULL;
+ }
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/moduleconfigstwo.c b/tests/modules/moduleconfigstwo.c
new file mode 100644
index 0000000..c0e8f91
--- /dev/null
+++ b/tests/modules/moduleconfigstwo.c
@@ -0,0 +1,39 @@
+#include "redismodule.h"
+#include <strings.h>
+
+/* Second module configs module, for testing.
+ * Need to make sure that multiple modules with configs don't interfere with each other */
+int bool_config;
+
+int getBoolConfigCommand(const char *name, void *privdata) {
+ REDISMODULE_NOT_USED(privdata);
+ if (!strcasecmp(name, "test")) {
+ return bool_config;
+ }
+ return 0;
+}
+
+int setBoolConfigCommand(const char *name, int new, void *privdata, RedisModuleString **err) {
+ REDISMODULE_NOT_USED(privdata);
+ REDISMODULE_NOT_USED(err);
+ if (!strcasecmp(name, "test")) {
+ bool_config = new;
+ return REDISMODULE_OK;
+ }
+ return REDISMODULE_ERR;
+}
+
+/* No arguments are expected */
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ if (RedisModule_Init(ctx, "configs", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ if (RedisModule_RegisterBoolConfig(ctx, "test", 1, REDISMODULE_CONFIG_DEFAULT, getBoolConfigCommand, setBoolConfigCommand, NULL, &argc) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+ if (RedisModule_LoadConfigs(ctx) == REDISMODULE_ERR) {
+ return REDISMODULE_ERR;
+ }
+ return REDISMODULE_OK;
+} \ No newline at end of file
diff --git a/tests/modules/postnotifications.c b/tests/modules/postnotifications.c
new file mode 100644
index 0000000..b4a97cb
--- /dev/null
+++ b/tests/modules/postnotifications.c
@@ -0,0 +1,303 @@
+/* This module is used to test the server post keyspace jobs API.
+ *
+ * -----------------------------------------------------------------------------
+ *
+ * Copyright (c) 2020, Meir Shpilraien <meir at redislabs dot com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Redis nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* This module allow to verify 'RedisModule_AddPostNotificationJob' by registering to 3
+ * key space event:
+ * * STRINGS - the module register to all strings notifications and set post notification job
+ * that increase a counter indicating how many times the string key was changed.
+ * In addition, it increase another counter that counts the total changes that
+ * was made on all strings keys.
+ * * EXPIRED - the module register to expired event and set post notification job that that
+ * counts the total number of expired events.
+ * * EVICTED - the module register to evicted event and set post notification job that that
+ * counts the total number of evicted events.
+ *
+ * In addition, the module register a new command, 'postnotification.async_set', that performs a set
+ * command from a background thread. This allows to check the 'RedisModule_AddPostNotificationJob' on
+ * notifications that was triggered on a background thread. */
+
+#define _BSD_SOURCE
+#define _DEFAULT_SOURCE /* For usleep */
+
+#include "redismodule.h"
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <pthread.h>
+
+static void KeySpace_PostNotificationStringFreePD(void *pd) {
+ RedisModule_FreeString(NULL, pd);
+}
+
+static void KeySpace_PostNotificationReadKey(RedisModuleCtx *ctx, void *pd) {
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "get", "!s", pd);
+ RedisModule_FreeCallReply(rep);
+}
+
+static void KeySpace_PostNotificationString(RedisModuleCtx *ctx, void *pd) {
+ REDISMODULE_NOT_USED(ctx);
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "incr", "!s", pd);
+ RedisModule_FreeCallReply(rep);
+}
+
+static int KeySpace_NotificationExpired(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key){
+ REDISMODULE_NOT_USED(type);
+ REDISMODULE_NOT_USED(event);
+ REDISMODULE_NOT_USED(key);
+
+ RedisModuleString *new_key = RedisModule_CreateString(NULL, "expired", 7);
+ RedisModule_AddPostNotificationJob(ctx, KeySpace_PostNotificationString, new_key, KeySpace_PostNotificationStringFreePD);
+ return REDISMODULE_OK;
+}
+
+static int KeySpace_NotificationEvicted(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key){
+ REDISMODULE_NOT_USED(type);
+ REDISMODULE_NOT_USED(event);
+ REDISMODULE_NOT_USED(key);
+
+ const char *key_str = RedisModule_StringPtrLen(key, NULL);
+
+ if (strncmp(key_str, "evicted", 7) == 0) {
+ return REDISMODULE_OK; /* do not count the evicted key */
+ }
+
+ if (strncmp(key_str, "before_evicted", 14) == 0) {
+ return REDISMODULE_OK; /* do not count the before_evicted key */
+ }
+
+ RedisModuleString *new_key = RedisModule_CreateString(NULL, "evicted", 7);
+ RedisModule_AddPostNotificationJob(ctx, KeySpace_PostNotificationString, new_key, KeySpace_PostNotificationStringFreePD);
+ return REDISMODULE_OK;
+}
+
+static int KeySpace_NotificationString(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key){
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(type);
+ REDISMODULE_NOT_USED(event);
+
+ const char *key_str = RedisModule_StringPtrLen(key, NULL);
+
+ if (strncmp(key_str, "string_", 7) != 0) {
+ return REDISMODULE_OK;
+ }
+
+ if (strcmp(key_str, "string_total") == 0) {
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleString *new_key;
+ if (strncmp(key_str, "string_changed{", 15) == 0) {
+ new_key = RedisModule_CreateString(NULL, "string_total", 12);
+ } else {
+ new_key = RedisModule_CreateStringPrintf(NULL, "string_changed{%s}", key_str);
+ }
+
+ RedisModule_AddPostNotificationJob(ctx, KeySpace_PostNotificationString, new_key, KeySpace_PostNotificationStringFreePD);
+ return REDISMODULE_OK;
+}
+
+static int KeySpace_LazyExpireInsidePostNotificationJob(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key){
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(type);
+ REDISMODULE_NOT_USED(event);
+
+ const char *key_str = RedisModule_StringPtrLen(key, NULL);
+
+ if (strncmp(key_str, "read_", 5) != 0) {
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleString *new_key = RedisModule_CreateString(NULL, key_str + 5, strlen(key_str) - 5);;
+ RedisModule_AddPostNotificationJob(ctx, KeySpace_PostNotificationReadKey, new_key, KeySpace_PostNotificationStringFreePD);
+ return REDISMODULE_OK;
+}
+
+static int KeySpace_NestedNotification(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key){
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(type);
+ REDISMODULE_NOT_USED(event);
+
+ const char *key_str = RedisModule_StringPtrLen(key, NULL);
+
+ if (strncmp(key_str, "write_sync_", 11) != 0) {
+ return REDISMODULE_OK;
+ }
+
+ /* This test was only meant to check REDISMODULE_OPTIONS_ALLOW_NESTED_KEYSPACE_NOTIFICATIONS.
+ * In general it is wrong and discourage to perform any writes inside a notification callback. */
+ RedisModuleString *new_key = RedisModule_CreateString(NULL, key_str + 11, strlen(key_str) - 11);;
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "set", "!sc", new_key, "1");
+ RedisModule_FreeCallReply(rep);
+ RedisModule_FreeString(NULL, new_key);
+ return REDISMODULE_OK;
+}
+
+static void *KeySpace_PostNotificationsAsyncSetInner(void *arg) {
+ RedisModuleBlockedClient *bc = arg;
+ RedisModuleCtx *ctx = RedisModule_GetThreadSafeContext(bc);
+ RedisModule_ThreadSafeContextLock(ctx);
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "set", "!cc", "string_x", "1");
+ RedisModule_ThreadSafeContextUnlock(ctx);
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+
+ RedisModule_UnblockClient(bc, NULL);
+ RedisModule_FreeThreadSafeContext(ctx);
+ return NULL;
+}
+
+static int KeySpace_PostNotificationsAsyncSet(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ if (argc != 1)
+ return RedisModule_WrongArity(ctx);
+
+ pthread_t tid;
+ RedisModuleBlockedClient *bc = RedisModule_BlockClient(ctx,NULL,NULL,NULL,0);
+
+ if (pthread_create(&tid,NULL,KeySpace_PostNotificationsAsyncSetInner,bc) != 0) {
+ RedisModule_AbortBlock(bc);
+ return RedisModule_ReplyWithError(ctx,"-ERR Can't start thread");
+ }
+ return REDISMODULE_OK;
+}
+
+typedef struct KeySpace_EventPostNotificationCtx {
+ RedisModuleString *triggered_on;
+ RedisModuleString *new_key;
+} KeySpace_EventPostNotificationCtx;
+
+static void KeySpace_ServerEventPostNotificationFree(void *pd) {
+ KeySpace_EventPostNotificationCtx *pn_ctx = pd;
+ RedisModule_FreeString(NULL, pn_ctx->new_key);
+ RedisModule_FreeString(NULL, pn_ctx->triggered_on);
+ RedisModule_Free(pn_ctx);
+}
+
+static void KeySpace_ServerEventPostNotification(RedisModuleCtx *ctx, void *pd) {
+ REDISMODULE_NOT_USED(ctx);
+ KeySpace_EventPostNotificationCtx *pn_ctx = pd;
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "lpush", "!ss", pn_ctx->new_key, pn_ctx->triggered_on);
+ RedisModule_FreeCallReply(rep);
+}
+
+static void KeySpace_ServerEventCallback(RedisModuleCtx *ctx, RedisModuleEvent eid, uint64_t subevent, void *data) {
+ REDISMODULE_NOT_USED(eid);
+ REDISMODULE_NOT_USED(data);
+ if (subevent > 3) {
+ RedisModule_Log(ctx, "warning", "Got an unexpected subevent '%ld'", subevent);
+ return;
+ }
+ static const char* events[] = {
+ "before_deleted",
+ "before_expired",
+ "before_evicted",
+ "before_overwritten",
+ };
+
+ const RedisModuleString *key_name = RedisModule_GetKeyNameFromModuleKey(((RedisModuleKeyInfo*)data)->key);
+ const char *key_str = RedisModule_StringPtrLen(key_name, NULL);
+
+ for (int i = 0 ; i < 4 ; ++i) {
+ const char *event = events[i];
+ if (strncmp(key_str, event , strlen(event)) == 0) {
+ return; /* don't log any event on our tracking keys */
+ }
+ }
+
+ KeySpace_EventPostNotificationCtx *pn_ctx = RedisModule_Alloc(sizeof(*pn_ctx));
+ pn_ctx->triggered_on = RedisModule_HoldString(NULL, (RedisModuleString*)key_name);
+ pn_ctx->new_key = RedisModule_CreateString(NULL, events[subevent], strlen(events[subevent]));
+ RedisModule_AddPostNotificationJob(ctx, KeySpace_ServerEventPostNotification, pn_ctx, KeySpace_ServerEventPostNotificationFree);
+}
+
+/* This function must be present on each Redis module. It is used in order to
+ * register the commands into the Redis server. */
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx,"postnotifications",1,REDISMODULE_APIVER_1) == REDISMODULE_ERR){
+ return REDISMODULE_ERR;
+ }
+
+ if (!(RedisModule_GetModuleOptionsAll() & REDISMODULE_OPTIONS_ALLOW_NESTED_KEYSPACE_NOTIFICATIONS)) {
+ return REDISMODULE_ERR;
+ }
+
+ int with_key_events = 0;
+ if (argc >= 1) {
+ const char *arg = RedisModule_StringPtrLen(argv[0], 0);
+ if (strcmp(arg, "with_key_events") == 0) {
+ with_key_events = 1;
+ }
+ }
+
+ RedisModule_SetModuleOptions(ctx, REDISMODULE_OPTIONS_ALLOW_NESTED_KEYSPACE_NOTIFICATIONS);
+
+ if(RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_STRING, KeySpace_NotificationString) != REDISMODULE_OK){
+ return REDISMODULE_ERR;
+ }
+
+ if(RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_STRING, KeySpace_LazyExpireInsidePostNotificationJob) != REDISMODULE_OK){
+ return REDISMODULE_ERR;
+ }
+
+ if(RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_STRING, KeySpace_NestedNotification) != REDISMODULE_OK){
+ return REDISMODULE_ERR;
+ }
+
+ if(RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_EXPIRED, KeySpace_NotificationExpired) != REDISMODULE_OK){
+ return REDISMODULE_ERR;
+ }
+
+ if(RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_EVICTED, KeySpace_NotificationEvicted) != REDISMODULE_OK){
+ return REDISMODULE_ERR;
+ }
+
+ if (with_key_events) {
+ if(RedisModule_SubscribeToServerEvent(ctx, RedisModuleEvent_Key, KeySpace_ServerEventCallback) != REDISMODULE_OK){
+ return REDISMODULE_ERR;
+ }
+ }
+
+ if (RedisModule_CreateCommand(ctx, "postnotification.async_set", KeySpace_PostNotificationsAsyncSet,
+ "write", 0, 0, 0) == REDISMODULE_ERR){
+ return REDISMODULE_ERR;
+ }
+
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnUnload(RedisModuleCtx *ctx) {
+ REDISMODULE_NOT_USED(ctx);
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/propagate.c b/tests/modules/propagate.c
new file mode 100644
index 0000000..d5132a5
--- /dev/null
+++ b/tests/modules/propagate.c
@@ -0,0 +1,403 @@
+/* This module is used to test the propagation (replication + AOF) of
+ * commands, via the RedisModule_Replicate() interface, in asynchronous
+ * contexts, such as callbacks not implementing commands, and thread safe
+ * contexts.
+ *
+ * We create a timer callback and a threads using a thread safe context.
+ * Using both we try to propagate counters increments, and later we check
+ * if the replica contains the changes as expected.
+ *
+ * -----------------------------------------------------------------------------
+ *
+ * Copyright (c) 2019, Salvatore Sanfilippo <antirez at gmail dot com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Redis nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "redismodule.h"
+#include <pthread.h>
+#include <errno.h>
+
+#define UNUSED(V) ((void) V)
+
+RedisModuleCtx *detached_ctx = NULL;
+
+static int KeySpace_NotificationGeneric(RedisModuleCtx *ctx, int type, const char *event, RedisModuleString *key) {
+ REDISMODULE_NOT_USED(type);
+ REDISMODULE_NOT_USED(event);
+ REDISMODULE_NOT_USED(key);
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, "INCR", "c!", "notifications");
+ RedisModule_FreeCallReply(rep);
+
+ return REDISMODULE_OK;
+}
+
+/* Timer callback. */
+void timerHandler(RedisModuleCtx *ctx, void *data) {
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(data);
+
+ static int times = 0;
+
+ RedisModule_Replicate(ctx,"INCR","c","timer");
+ times++;
+
+ if (times < 3)
+ RedisModule_CreateTimer(ctx,100,timerHandler,NULL);
+ else
+ times = 0;
+}
+
+int propagateTestTimerCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModuleTimerID timer_id =
+ RedisModule_CreateTimer(ctx,100,timerHandler,NULL);
+ REDISMODULE_NOT_USED(timer_id);
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+}
+
+/* Timer callback. */
+void timerNestedHandler(RedisModuleCtx *ctx, void *data) {
+ int repl = (long long)data;
+
+ /* The goal is the trigger a module command that calls RM_Replicate
+ * in order to test MULTI/EXEC structure */
+ RedisModule_Replicate(ctx,"INCRBY","cc","timer-nested-start","1");
+ RedisModuleCallReply *reply = RedisModule_Call(ctx,"propagate-test.nested", repl? "!" : "");
+ RedisModule_FreeCallReply(reply);
+ reply = RedisModule_Call(ctx, "INCR", repl? "c!" : "c", "timer-nested-middle");
+ RedisModule_FreeCallReply(reply);
+ RedisModule_Replicate(ctx,"INCRBY","cc","timer-nested-end","1");
+}
+
+int propagateTestTimerNestedCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModuleTimerID timer_id =
+ RedisModule_CreateTimer(ctx,100,timerNestedHandler,(void*)0);
+ REDISMODULE_NOT_USED(timer_id);
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+}
+
+int propagateTestTimerNestedReplCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModuleTimerID timer_id =
+ RedisModule_CreateTimer(ctx,100,timerNestedHandler,(void*)1);
+ REDISMODULE_NOT_USED(timer_id);
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+}
+
+void timerHandlerMaxmemory(RedisModuleCtx *ctx, void *data) {
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(data);
+
+ RedisModuleCallReply *reply = RedisModule_Call(ctx,"SETEX","ccc!","timer-maxmemory-volatile-start","100","1");
+ RedisModule_FreeCallReply(reply);
+ reply = RedisModule_Call(ctx, "CONFIG", "ccc!", "SET", "maxmemory", "1");
+ RedisModule_FreeCallReply(reply);
+
+ RedisModule_Replicate(ctx, "INCR", "c", "timer-maxmemory-middle");
+
+ reply = RedisModule_Call(ctx,"SETEX","ccc!","timer-maxmemory-volatile-end","100","1");
+ RedisModule_FreeCallReply(reply);
+}
+
+int propagateTestTimerMaxmemoryCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModuleTimerID timer_id =
+ RedisModule_CreateTimer(ctx,100,timerHandlerMaxmemory,(void*)1);
+ REDISMODULE_NOT_USED(timer_id);
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+}
+
+void timerHandlerEval(RedisModuleCtx *ctx, void *data) {
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(data);
+
+ RedisModuleCallReply *reply = RedisModule_Call(ctx,"INCRBY","cc!","timer-eval-start","1");
+ RedisModule_FreeCallReply(reply);
+ reply = RedisModule_Call(ctx, "EVAL", "cccc!", "redis.call('set',KEYS[1],ARGV[1])", "1", "foo", "bar");
+ RedisModule_FreeCallReply(reply);
+
+ RedisModule_Replicate(ctx, "INCR", "c", "timer-eval-middle");
+
+ reply = RedisModule_Call(ctx,"INCRBY","cc!","timer-eval-end","1");
+ RedisModule_FreeCallReply(reply);
+}
+
+int propagateTestTimerEvalCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModuleTimerID timer_id =
+ RedisModule_CreateTimer(ctx,100,timerHandlerEval,(void*)1);
+ REDISMODULE_NOT_USED(timer_id);
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+}
+
+/* The thread entry point. */
+void *threadMain(void *arg) {
+ REDISMODULE_NOT_USED(arg);
+ RedisModuleCtx *ctx = RedisModule_GetThreadSafeContext(NULL);
+ RedisModule_SelectDb(ctx,9); /* Tests ran in database number 9. */
+ for (int i = 0; i < 3; i++) {
+ RedisModule_ThreadSafeContextLock(ctx);
+ RedisModule_Replicate(ctx,"INCR","c","a-from-thread");
+ RedisModuleCallReply *reply = RedisModule_Call(ctx,"INCR","c!","thread-call");
+ RedisModule_FreeCallReply(reply);
+ RedisModule_Replicate(ctx,"INCR","c","b-from-thread");
+ RedisModule_ThreadSafeContextUnlock(ctx);
+ }
+ RedisModule_FreeThreadSafeContext(ctx);
+ return NULL;
+}
+
+int propagateTestThreadCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ pthread_t tid;
+ if (pthread_create(&tid,NULL,threadMain,NULL) != 0)
+ return RedisModule_ReplyWithError(ctx,"-ERR Can't start thread");
+ REDISMODULE_NOT_USED(tid);
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+}
+
+/* The thread entry point. */
+void *threadDetachedMain(void *arg) {
+ REDISMODULE_NOT_USED(arg);
+ RedisModule_SelectDb(detached_ctx,9); /* Tests ran in database number 9. */
+
+ RedisModule_ThreadSafeContextLock(detached_ctx);
+ RedisModule_Replicate(detached_ctx,"INCR","c","thread-detached-before");
+ RedisModuleCallReply *reply = RedisModule_Call(detached_ctx,"INCR","c!","thread-detached-1");
+ RedisModule_FreeCallReply(reply);
+ reply = RedisModule_Call(detached_ctx,"INCR","c!","thread-detached-2");
+ RedisModule_FreeCallReply(reply);
+ RedisModule_Replicate(detached_ctx,"INCR","c","thread-detached-after");
+ RedisModule_ThreadSafeContextUnlock(detached_ctx);
+
+ return NULL;
+}
+
+int propagateTestDetachedThreadCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ pthread_t tid;
+ if (pthread_create(&tid,NULL,threadDetachedMain,NULL) != 0)
+ return RedisModule_ReplyWithError(ctx,"-ERR Can't start thread");
+ REDISMODULE_NOT_USED(tid);
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+}
+
+int propagateTestSimpleCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ /* Replicate two commands to test MULTI/EXEC wrapping. */
+ RedisModule_Replicate(ctx,"INCR","c","counter-1");
+ RedisModule_Replicate(ctx,"INCR","c","counter-2");
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+}
+
+int propagateTestMixedCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ RedisModuleCallReply *reply;
+
+ /* This test mixes multiple propagation systems. */
+ reply = RedisModule_Call(ctx, "INCR", "c!", "using-call");
+ RedisModule_FreeCallReply(reply);
+
+ RedisModule_Replicate(ctx,"INCR","c","counter-1");
+ RedisModule_Replicate(ctx,"INCR","c","counter-2");
+
+ reply = RedisModule_Call(ctx, "INCR", "c!", "after-call");
+ RedisModule_FreeCallReply(reply);
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+}
+
+int propagateTestNestedCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ RedisModuleCallReply *reply;
+
+ /* This test mixes multiple propagation systems. */
+ reply = RedisModule_Call(ctx, "INCR", "c!", "using-call");
+ RedisModule_FreeCallReply(reply);
+
+ reply = RedisModule_Call(ctx,"propagate-test.simple", "!");
+ RedisModule_FreeCallReply(reply);
+
+ RedisModule_Replicate(ctx,"INCR","c","counter-3");
+ RedisModule_Replicate(ctx,"INCR","c","counter-4");
+
+ reply = RedisModule_Call(ctx, "INCR", "c!", "after-call");
+ RedisModule_FreeCallReply(reply);
+
+ reply = RedisModule_Call(ctx, "INCR", "c!", "before-call-2");
+ RedisModule_FreeCallReply(reply);
+
+ reply = RedisModule_Call(ctx, "keyspace.incr_case1", "c!", "asdf"); /* Propagates INCR */
+ RedisModule_FreeCallReply(reply);
+
+ reply = RedisModule_Call(ctx, "keyspace.del_key_copy", "c!", "asdf"); /* Propagates DEL */
+ RedisModule_FreeCallReply(reply);
+
+ reply = RedisModule_Call(ctx, "INCR", "c!", "after-call-2");
+ RedisModule_FreeCallReply(reply);
+
+ RedisModule_ReplyWithSimpleString(ctx,"OK");
+ return REDISMODULE_OK;
+}
+
+int propagateTestIncr(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argc);
+ RedisModuleCallReply *reply;
+
+ /* This test propagates the module command, not the INCR it executes. */
+ reply = RedisModule_Call(ctx, "INCR", "s", argv[1]);
+ RedisModule_ReplyWithCallReply(ctx,reply);
+ RedisModule_FreeCallReply(reply);
+ RedisModule_ReplicateVerbatim(ctx);
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx,"propagate-test",1,REDISMODULE_APIVER_1)
+ == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ detached_ctx = RedisModule_GetDetachedThreadSafeContext(ctx);
+
+ if (RedisModule_SubscribeToKeyspaceEvents(ctx, REDISMODULE_NOTIFY_ALL, KeySpace_NotificationGeneric) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"propagate-test.timer",
+ propagateTestTimerCommand,
+ "",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"propagate-test.timer-nested",
+ propagateTestTimerNestedCommand,
+ "",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"propagate-test.timer-nested-repl",
+ propagateTestTimerNestedReplCommand,
+ "",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"propagate-test.timer-maxmemory",
+ propagateTestTimerMaxmemoryCommand,
+ "",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"propagate-test.timer-eval",
+ propagateTestTimerEvalCommand,
+ "",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"propagate-test.thread",
+ propagateTestThreadCommand,
+ "",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"propagate-test.detached-thread",
+ propagateTestDetachedThreadCommand,
+ "",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"propagate-test.simple",
+ propagateTestSimpleCommand,
+ "",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"propagate-test.mixed",
+ propagateTestMixedCommand,
+ "write",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"propagate-test.nested",
+ propagateTestNestedCommand,
+ "write",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"propagate-test.incr",
+ propagateTestIncr,
+ "write",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnUnload(RedisModuleCtx *ctx) {
+ UNUSED(ctx);
+
+ if (detached_ctx)
+ RedisModule_FreeThreadSafeContext(detached_ctx);
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/publish.c b/tests/modules/publish.c
new file mode 100644
index 0000000..ff276d8
--- /dev/null
+++ b/tests/modules/publish.c
@@ -0,0 +1,57 @@
+#include "redismodule.h"
+#include <string.h>
+#include <assert.h>
+#include <unistd.h>
+
+#define UNUSED(V) ((void) V)
+
+int cmd_publish_classic_multi(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc < 3)
+ return RedisModule_WrongArity(ctx);
+ RedisModule_ReplyWithArray(ctx, argc-2);
+ for (int i = 2; i < argc; i++) {
+ int receivers = RedisModule_PublishMessage(ctx, argv[1], argv[i]);
+ RedisModule_ReplyWithLongLong(ctx, receivers);
+ }
+ return REDISMODULE_OK;
+}
+
+int cmd_publish_classic(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc != 3)
+ return RedisModule_WrongArity(ctx);
+
+ int receivers = RedisModule_PublishMessage(ctx, argv[1], argv[2]);
+ RedisModule_ReplyWithLongLong(ctx, receivers);
+ return REDISMODULE_OK;
+}
+
+int cmd_publish_shard(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc != 3)
+ return RedisModule_WrongArity(ctx);
+
+ int receivers = RedisModule_PublishMessageShard(ctx, argv[1], argv[2]);
+ RedisModule_ReplyWithLongLong(ctx, receivers);
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+
+ if (RedisModule_Init(ctx,"publish",1,REDISMODULE_APIVER_1)== REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"publish.classic",cmd_publish_classic,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"publish.classic_multi",cmd_publish_classic_multi,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"publish.shard",cmd_publish_shard,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/rdbloadsave.c b/tests/modules/rdbloadsave.c
new file mode 100644
index 0000000..687269a
--- /dev/null
+++ b/tests/modules/rdbloadsave.c
@@ -0,0 +1,162 @@
+#include "redismodule.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <memory.h>
+#include <errno.h>
+
+/* Sanity tests to verify inputs and return values. */
+int sanity(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ RedisModuleRdbStream *s = RedisModule_RdbStreamCreateFromFile("dbnew.rdb");
+
+ /* NULL stream should fail. */
+ if (RedisModule_RdbLoad(ctx, NULL, 0) == REDISMODULE_OK || errno != EINVAL) {
+ RedisModule_ReplyWithError(ctx, strerror(errno));
+ goto out;
+ }
+
+ /* Invalid flags should fail. */
+ if (RedisModule_RdbLoad(ctx, s, 188) == REDISMODULE_OK || errno != EINVAL) {
+ RedisModule_ReplyWithError(ctx, strerror(errno));
+ goto out;
+ }
+
+ /* Missing file should fail. */
+ if (RedisModule_RdbLoad(ctx, s, 0) == REDISMODULE_OK || errno != ENOENT) {
+ RedisModule_ReplyWithError(ctx, strerror(errno));
+ goto out;
+ }
+
+ /* Save RDB file. */
+ if (RedisModule_RdbSave(ctx, s, 0) != REDISMODULE_OK || errno != 0) {
+ RedisModule_ReplyWithError(ctx, strerror(errno));
+ goto out;
+ }
+
+ /* Load the saved RDB file. */
+ if (RedisModule_RdbLoad(ctx, s, 0) != REDISMODULE_OK || errno != 0) {
+ RedisModule_ReplyWithError(ctx, strerror(errno));
+ goto out;
+ }
+
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+
+ out:
+ RedisModule_RdbStreamFree(s);
+ return REDISMODULE_OK;
+}
+
+int cmd_rdbsave(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ size_t len;
+ const char *filename = RedisModule_StringPtrLen(argv[1], &len);
+
+ char tmp[len + 1];
+ memcpy(tmp, filename, len);
+ tmp[len] = '\0';
+
+ RedisModuleRdbStream *stream = RedisModule_RdbStreamCreateFromFile(tmp);
+
+ if (RedisModule_RdbSave(ctx, stream, 0) != REDISMODULE_OK || errno != 0) {
+ RedisModule_ReplyWithError(ctx, strerror(errno));
+ goto out;
+ }
+
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+
+out:
+ RedisModule_RdbStreamFree(stream);
+ return REDISMODULE_OK;
+}
+
+/* Fork before calling RM_RdbSave(). */
+int cmd_rdbsave_fork(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ size_t len;
+ const char *filename = RedisModule_StringPtrLen(argv[1], &len);
+
+ char tmp[len + 1];
+ memcpy(tmp, filename, len);
+ tmp[len] = '\0';
+
+ int fork_child_pid = RedisModule_Fork(NULL, NULL);
+ if (fork_child_pid < 0) {
+ RedisModule_ReplyWithError(ctx, strerror(errno));
+ return REDISMODULE_OK;
+ } else if (fork_child_pid > 0) {
+ /* parent */
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleRdbStream *stream = RedisModule_RdbStreamCreateFromFile(tmp);
+
+ int ret = 0;
+ if (RedisModule_RdbSave(ctx, stream, 0) != REDISMODULE_OK) {
+ ret = errno;
+ }
+ RedisModule_RdbStreamFree(stream);
+
+ RedisModule_ExitFromChild(ret);
+ return REDISMODULE_OK;
+}
+
+int cmd_rdbload(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ size_t len;
+ const char *filename = RedisModule_StringPtrLen(argv[1], &len);
+
+ char tmp[len + 1];
+ memcpy(tmp, filename, len);
+ tmp[len] = '\0';
+
+ RedisModuleRdbStream *stream = RedisModule_RdbStreamCreateFromFile(tmp);
+
+ if (RedisModule_RdbLoad(ctx, stream, 0) != REDISMODULE_OK || errno != 0) {
+ RedisModule_RdbStreamFree(stream);
+ RedisModule_ReplyWithError(ctx, strerror(errno));
+ return REDISMODULE_OK;
+ }
+
+ RedisModule_RdbStreamFree(stream);
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx, "rdbloadsave", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "test.sanity", sanity, "", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "test.rdbsave", cmd_rdbsave, "", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "test.rdbsave_fork", cmd_rdbsave_fork, "", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "test.rdbload", cmd_rdbload, "", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/reply.c b/tests/modules/reply.c
new file mode 100644
index 0000000..c5baa66
--- /dev/null
+++ b/tests/modules/reply.c
@@ -0,0 +1,214 @@
+/*
+ * A module the tests RM_ReplyWith family of commands
+ */
+
+#include "redismodule.h"
+#include <math.h>
+
+int rw_string(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) return RedisModule_WrongArity(ctx);
+
+ return RedisModule_ReplyWithString(ctx, argv[1]);
+}
+
+int rw_cstring(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ if (argc != 1) return RedisModule_WrongArity(ctx);
+
+ return RedisModule_ReplyWithSimpleString(ctx, "A simple string");
+}
+
+int rw_int(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) return RedisModule_WrongArity(ctx);
+
+ long long integer;
+ if (RedisModule_StringToLongLong(argv[1], &integer) != REDISMODULE_OK)
+ return RedisModule_ReplyWithError(ctx, "Arg cannot be parsed as an integer");
+
+ return RedisModule_ReplyWithLongLong(ctx, integer);
+}
+
+/* When one argument is given, it is returned as a double,
+ * when two arguments are given, it returns a/b. */
+int rw_double(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc==1)
+ return RedisModule_ReplyWithDouble(ctx, NAN);
+
+ if (argc != 2 && argc != 3) return RedisModule_WrongArity(ctx);
+
+ double dbl, dbl2;
+ if (RedisModule_StringToDouble(argv[1], &dbl) != REDISMODULE_OK)
+ return RedisModule_ReplyWithError(ctx, "Arg cannot be parsed as a double");
+ if (argc == 3) {
+ if (RedisModule_StringToDouble(argv[2], &dbl2) != REDISMODULE_OK)
+ return RedisModule_ReplyWithError(ctx, "Arg cannot be parsed as a double");
+ dbl /= dbl2;
+ }
+
+ return RedisModule_ReplyWithDouble(ctx, dbl);
+}
+
+int rw_longdouble(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) return RedisModule_WrongArity(ctx);
+
+ long double longdbl;
+ if (RedisModule_StringToLongDouble(argv[1], &longdbl) != REDISMODULE_OK)
+ return RedisModule_ReplyWithError(ctx, "Arg cannot be parsed as a double");
+
+ return RedisModule_ReplyWithLongDouble(ctx, longdbl);
+}
+
+int rw_bignumber(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) return RedisModule_WrongArity(ctx);
+
+ size_t bignum_len;
+ const char *bignum_str = RedisModule_StringPtrLen(argv[1], &bignum_len);
+
+ return RedisModule_ReplyWithBigNumber(ctx, bignum_str, bignum_len);
+}
+
+int rw_array(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) return RedisModule_WrongArity(ctx);
+
+ long long integer;
+ if (RedisModule_StringToLongLong(argv[1], &integer) != REDISMODULE_OK)
+ return RedisModule_ReplyWithError(ctx, "Arg cannot be parsed as a integer");
+
+ RedisModule_ReplyWithArray(ctx, integer);
+ for (int i = 0; i < integer; ++i) {
+ RedisModule_ReplyWithLongLong(ctx, i);
+ }
+
+ return REDISMODULE_OK;
+}
+
+int rw_map(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) return RedisModule_WrongArity(ctx);
+
+ long long integer;
+ if (RedisModule_StringToLongLong(argv[1], &integer) != REDISMODULE_OK)
+ return RedisModule_ReplyWithError(ctx, "Arg cannot be parsed as a integer");
+
+ RedisModule_ReplyWithMap(ctx, integer);
+ for (int i = 0; i < integer; ++i) {
+ RedisModule_ReplyWithLongLong(ctx, i);
+ RedisModule_ReplyWithDouble(ctx, i * 1.5);
+ }
+
+ return REDISMODULE_OK;
+}
+
+int rw_set(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) return RedisModule_WrongArity(ctx);
+
+ long long integer;
+ if (RedisModule_StringToLongLong(argv[1], &integer) != REDISMODULE_OK)
+ return RedisModule_ReplyWithError(ctx, "Arg cannot be parsed as a integer");
+
+ RedisModule_ReplyWithSet(ctx, integer);
+ for (int i = 0; i < integer; ++i) {
+ RedisModule_ReplyWithLongLong(ctx, i);
+ }
+
+ return REDISMODULE_OK;
+}
+
+int rw_attribute(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) return RedisModule_WrongArity(ctx);
+
+ long long integer;
+ if (RedisModule_StringToLongLong(argv[1], &integer) != REDISMODULE_OK)
+ return RedisModule_ReplyWithError(ctx, "Arg cannot be parsed as a integer");
+
+ if (RedisModule_ReplyWithAttribute(ctx, integer) != REDISMODULE_OK) {
+ return RedisModule_ReplyWithError(ctx, "Attributes aren't supported by RESP 2");
+ }
+
+ for (int i = 0; i < integer; ++i) {
+ RedisModule_ReplyWithLongLong(ctx, i);
+ RedisModule_ReplyWithDouble(ctx, i * 1.5);
+ }
+
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+int rw_bool(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ if (argc != 1) return RedisModule_WrongArity(ctx);
+
+ RedisModule_ReplyWithArray(ctx, 2);
+ RedisModule_ReplyWithBool(ctx, 0);
+ return RedisModule_ReplyWithBool(ctx, 1);
+}
+
+int rw_null(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ if (argc != 1) return RedisModule_WrongArity(ctx);
+
+ return RedisModule_ReplyWithNull(ctx);
+}
+
+int rw_error(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ if (argc != 1) return RedisModule_WrongArity(ctx);
+
+ return RedisModule_ReplyWithError(ctx, "An error");
+}
+
+int rw_error_format(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 3) return RedisModule_WrongArity(ctx);
+
+ return RedisModule_ReplyWithErrorFormat(ctx,
+ RedisModule_StringPtrLen(argv[1], NULL),
+ RedisModule_StringPtrLen(argv[2], NULL));
+}
+
+int rw_verbatim(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) return RedisModule_WrongArity(ctx);
+
+ size_t verbatim_len;
+ const char *verbatim_str = RedisModule_StringPtrLen(argv[1], &verbatim_len);
+
+ return RedisModule_ReplyWithVerbatimString(ctx, verbatim_str, verbatim_len);
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ if (RedisModule_Init(ctx, "replywith", 1, REDISMODULE_APIVER_1) != REDISMODULE_OK)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"rw.string",rw_string,"",0,0,0) != REDISMODULE_OK)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"rw.cstring",rw_cstring,"",0,0,0) != REDISMODULE_OK)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"rw.bignumber",rw_bignumber,"",0,0,0) != REDISMODULE_OK)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"rw.int",rw_int,"",0,0,0) != REDISMODULE_OK)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"rw.double",rw_double,"",0,0,0) != REDISMODULE_OK)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"rw.longdouble",rw_longdouble,"",0,0,0) != REDISMODULE_OK)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"rw.array",rw_array,"",0,0,0) != REDISMODULE_OK)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"rw.map",rw_map,"",0,0,0) != REDISMODULE_OK)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"rw.attribute",rw_attribute,"",0,0,0) != REDISMODULE_OK)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"rw.set",rw_set,"",0,0,0) != REDISMODULE_OK)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"rw.bool",rw_bool,"",0,0,0) != REDISMODULE_OK)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"rw.null",rw_null,"",0,0,0) != REDISMODULE_OK)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"rw.error",rw_error,"",0,0,0) != REDISMODULE_OK)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"rw.error_format",rw_error_format,"",0,0,0) != REDISMODULE_OK)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"rw.verbatim",rw_verbatim,"",0,0,0) != REDISMODULE_OK)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/scan.c b/tests/modules/scan.c
new file mode 100644
index 0000000..1723d30
--- /dev/null
+++ b/tests/modules/scan.c
@@ -0,0 +1,121 @@
+#include "redismodule.h"
+
+#include <string.h>
+#include <assert.h>
+#include <unistd.h>
+
+typedef struct {
+ size_t nkeys;
+} scan_strings_pd;
+
+void scan_strings_callback(RedisModuleCtx *ctx, RedisModuleString* keyname, RedisModuleKey* key, void *privdata) {
+ scan_strings_pd* pd = privdata;
+ int was_opened = 0;
+ if (!key) {
+ key = RedisModule_OpenKey(ctx, keyname, REDISMODULE_READ);
+ was_opened = 1;
+ }
+
+ if (RedisModule_KeyType(key) == REDISMODULE_KEYTYPE_STRING) {
+ size_t len;
+ char * data = RedisModule_StringDMA(key, &len, REDISMODULE_READ);
+ RedisModule_ReplyWithArray(ctx, 2);
+ RedisModule_ReplyWithString(ctx, keyname);
+ RedisModule_ReplyWithStringBuffer(ctx, data, len);
+ pd->nkeys++;
+ }
+ if (was_opened)
+ RedisModule_CloseKey(key);
+}
+
+int scan_strings(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ scan_strings_pd pd = {
+ .nkeys = 0,
+ };
+
+ RedisModule_ReplyWithArray(ctx, REDISMODULE_POSTPONED_LEN);
+
+ RedisModuleScanCursor* cursor = RedisModule_ScanCursorCreate();
+ while(RedisModule_Scan(ctx, cursor, scan_strings_callback, &pd));
+ RedisModule_ScanCursorDestroy(cursor);
+
+ RedisModule_ReplySetArrayLength(ctx, pd.nkeys);
+ return REDISMODULE_OK;
+}
+
+typedef struct {
+ RedisModuleCtx *ctx;
+ size_t nreplies;
+} scan_key_pd;
+
+void scan_key_callback(RedisModuleKey *key, RedisModuleString* field, RedisModuleString* value, void *privdata) {
+ REDISMODULE_NOT_USED(key);
+ scan_key_pd* pd = privdata;
+ RedisModule_ReplyWithArray(pd->ctx, 2);
+ size_t fieldCStrLen;
+
+ // The implementation of RedisModuleString is robj with lots of encodings.
+ // We want to make sure the robj that passes to this callback in
+ // String encoded, this is why we use RedisModule_StringPtrLen and
+ // RedisModule_ReplyWithStringBuffer instead of directly use
+ // RedisModule_ReplyWithString.
+ const char* fieldCStr = RedisModule_StringPtrLen(field, &fieldCStrLen);
+ RedisModule_ReplyWithStringBuffer(pd->ctx, fieldCStr, fieldCStrLen);
+ if(value){
+ size_t valueCStrLen;
+ const char* valueCStr = RedisModule_StringPtrLen(value, &valueCStrLen);
+ RedisModule_ReplyWithStringBuffer(pd->ctx, valueCStr, valueCStrLen);
+ } else {
+ RedisModule_ReplyWithNull(pd->ctx);
+ }
+
+ pd->nreplies++;
+}
+
+int scan_key(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc != 2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+ scan_key_pd pd = {
+ .ctx = ctx,
+ .nreplies = 0,
+ };
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_READ);
+ if (!key) {
+ RedisModule_ReplyWithError(ctx, "not found");
+ return REDISMODULE_OK;
+ }
+
+ RedisModule_ReplyWithArray(ctx, REDISMODULE_POSTPONED_ARRAY_LEN);
+
+ RedisModuleScanCursor* cursor = RedisModule_ScanCursorCreate();
+ while(RedisModule_ScanKey(key, cursor, scan_key_callback, &pd));
+ RedisModule_ScanCursorDestroy(cursor);
+
+ RedisModule_ReplySetArrayLength(ctx, pd.nreplies);
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ if (RedisModule_Init(ctx, "scan", 1, REDISMODULE_APIVER_1)== REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "scan.scan_strings", scan_strings, "", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "scan.scan_key", scan_key, "", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
+
+
diff --git a/tests/modules/stream.c b/tests/modules/stream.c
new file mode 100644
index 0000000..65762a3
--- /dev/null
+++ b/tests/modules/stream.c
@@ -0,0 +1,258 @@
+#include "redismodule.h"
+
+#include <string.h>
+#include <strings.h>
+#include <assert.h>
+#include <unistd.h>
+#include <errno.h>
+
+/* Command which adds a stream entry with automatic ID, like XADD *.
+ *
+ * Syntax: STREAM.ADD key field1 value1 [ field2 value2 ... ]
+ *
+ * The response is the ID of the added stream entry or an error message.
+ */
+int stream_add(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc < 2 || argc % 2 != 0) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+ RedisModuleStreamID id;
+ if (RedisModule_StreamAdd(key, REDISMODULE_STREAM_ADD_AUTOID, &id,
+ &argv[2], (argc-2)/2) == REDISMODULE_OK) {
+ RedisModuleString *id_str = RedisModule_CreateStringFromStreamID(ctx, &id);
+ RedisModule_ReplyWithString(ctx, id_str);
+ RedisModule_FreeString(ctx, id_str);
+ } else {
+ RedisModule_ReplyWithError(ctx, "ERR StreamAdd failed");
+ }
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+/* Command which adds a stream entry N times.
+ *
+ * Syntax: STREAM.ADD key N field1 value1 [ field2 value2 ... ]
+ *
+ * Returns the number of successfully added entries.
+ */
+int stream_addn(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc < 3 || argc % 2 == 0) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ long long n, i;
+ if (RedisModule_StringToLongLong(argv[2], &n) == REDISMODULE_ERR) {
+ RedisModule_ReplyWithError(ctx, "N must be a number");
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+ for (i = 0; i < n; i++) {
+ if (RedisModule_StreamAdd(key, REDISMODULE_STREAM_ADD_AUTOID, NULL,
+ &argv[3], (argc-3)/2) == REDISMODULE_ERR)
+ break;
+ }
+ RedisModule_ReplyWithLongLong(ctx, i);
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+/* STREAM.DELETE key stream-id */
+int stream_delete(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 3) return RedisModule_WrongArity(ctx);
+ RedisModuleStreamID id;
+ if (RedisModule_StringToStreamID(argv[2], &id) != REDISMODULE_OK) {
+ return RedisModule_ReplyWithError(ctx, "Invalid stream ID");
+ }
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+ if (RedisModule_StreamDelete(key, &id) == REDISMODULE_OK) {
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ } else {
+ RedisModule_ReplyWithError(ctx, "ERR StreamDelete failed");
+ }
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+/* STREAM.RANGE key start-id end-id
+ *
+ * Returns an array of stream items. Each item is an array on the form
+ * [stream-id, [field1, value1, field2, value2, ...]].
+ *
+ * A funny side-effect used for testing RM_StreamIteratorDelete() is that if any
+ * entry has a field named "selfdestruct", the stream entry is deleted. It is
+ * however included in the results of this command.
+ */
+int stream_range(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 4) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleStreamID startid, endid;
+ if (RedisModule_StringToStreamID(argv[2], &startid) != REDISMODULE_OK ||
+ RedisModule_StringToStreamID(argv[3], &endid) != REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "Invalid stream ID");
+ return REDISMODULE_OK;
+ }
+
+ /* If startid > endid, we swap and set the reverse flag. */
+ int flags = 0;
+ if (startid.ms > endid.ms ||
+ (startid.ms == endid.ms && startid.seq > endid.seq)) {
+ RedisModuleStreamID tmp = startid;
+ startid = endid;
+ endid = tmp;
+ flags |= REDISMODULE_STREAM_ITERATOR_REVERSE;
+ }
+
+ /* Open key and start iterator. */
+ int openflags = REDISMODULE_READ | REDISMODULE_WRITE;
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], openflags);
+ if (RedisModule_StreamIteratorStart(key, flags,
+ &startid, &endid) != REDISMODULE_OK) {
+ /* Key is not a stream, etc. */
+ RedisModule_ReplyWithError(ctx, "ERR StreamIteratorStart failed");
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+ }
+
+ /* Check error handling: Delete current entry when no current entry. */
+ assert(RedisModule_StreamIteratorDelete(key) ==
+ REDISMODULE_ERR);
+ assert(errno == ENOENT);
+
+ /* Check error handling: Fetch fields when no current entry. */
+ assert(RedisModule_StreamIteratorNextField(key, NULL, NULL) ==
+ REDISMODULE_ERR);
+ assert(errno == ENOENT);
+
+ /* Return array. */
+ RedisModule_ReplyWithArray(ctx, REDISMODULE_POSTPONED_LEN);
+ RedisModule_AutoMemory(ctx);
+ RedisModuleStreamID id;
+ long numfields;
+ long len = 0;
+ while (RedisModule_StreamIteratorNextID(key, &id,
+ &numfields) == REDISMODULE_OK) {
+ RedisModule_ReplyWithArray(ctx, 2);
+ RedisModuleString *id_str = RedisModule_CreateStringFromStreamID(ctx, &id);
+ RedisModule_ReplyWithString(ctx, id_str);
+ RedisModule_ReplyWithArray(ctx, numfields * 2);
+ int delete = 0;
+ RedisModuleString *field, *value;
+ for (long i = 0; i < numfields; i++) {
+ assert(RedisModule_StreamIteratorNextField(key, &field, &value) ==
+ REDISMODULE_OK);
+ RedisModule_ReplyWithString(ctx, field);
+ RedisModule_ReplyWithString(ctx, value);
+ /* check if this is a "selfdestruct" field */
+ size_t field_len;
+ const char *field_str = RedisModule_StringPtrLen(field, &field_len);
+ if (!strncmp(field_str, "selfdestruct", field_len)) delete = 1;
+ }
+ if (delete) {
+ assert(RedisModule_StreamIteratorDelete(key) == REDISMODULE_OK);
+ }
+ /* check error handling: no more fields to fetch */
+ assert(RedisModule_StreamIteratorNextField(key, &field, &value) ==
+ REDISMODULE_ERR);
+ assert(errno == ENOENT);
+ len++;
+ }
+ RedisModule_ReplySetArrayLength(ctx, len);
+ RedisModule_StreamIteratorStop(key);
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+/*
+ * STREAM.TRIM key (MAXLEN (=|~) length | MINID (=|~) id)
+ */
+int stream_trim(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 5) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ /* Parse args */
+ int trim_by_id = 0; /* 0 = maxlen, 1 = minid */
+ long long maxlen;
+ RedisModuleStreamID minid;
+ size_t arg_len;
+ const char *arg = RedisModule_StringPtrLen(argv[2], &arg_len);
+ if (!strcasecmp(arg, "minid")) {
+ trim_by_id = 1;
+ if (RedisModule_StringToStreamID(argv[4], &minid) != REDISMODULE_OK) {
+ RedisModule_ReplyWithError(ctx, "ERR Invalid stream ID");
+ return REDISMODULE_OK;
+ }
+ } else if (!strcasecmp(arg, "maxlen")) {
+ if (RedisModule_StringToLongLong(argv[4], &maxlen) == REDISMODULE_ERR) {
+ RedisModule_ReplyWithError(ctx, "ERR Maxlen must be a number");
+ return REDISMODULE_OK;
+ }
+ } else {
+ RedisModule_ReplyWithError(ctx, "ERR Invalid arguments");
+ return REDISMODULE_OK;
+ }
+
+ /* Approx or exact */
+ int flags;
+ arg = RedisModule_StringPtrLen(argv[3], &arg_len);
+ if (arg_len == 1 && arg[0] == '~') {
+ flags = REDISMODULE_STREAM_TRIM_APPROX;
+ } else if (arg_len == 1 && arg[0] == '=') {
+ flags = 0;
+ } else {
+ RedisModule_ReplyWithError(ctx, "ERR Invalid approx-or-exact mark");
+ return REDISMODULE_OK;
+ }
+
+ /* Trim */
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+ long long trimmed;
+ if (trim_by_id) {
+ trimmed = RedisModule_StreamTrimByID(key, flags, &minid);
+ } else {
+ trimmed = RedisModule_StreamTrimByLength(key, flags, maxlen);
+ }
+
+ /* Return result */
+ if (trimmed < 0) {
+ RedisModule_ReplyWithError(ctx, "ERR Trimming failed");
+ } else {
+ RedisModule_ReplyWithLongLong(ctx, trimmed);
+ }
+ RedisModule_CloseKey(key);
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ if (RedisModule_Init(ctx, "stream", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "stream.add", stream_add, "write",
+ 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx, "stream.addn", stream_addn, "write",
+ 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx, "stream.delete", stream_delete, "write",
+ 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx, "stream.range", stream_range, "write",
+ 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx, "stream.trim", stream_trim, "write",
+ 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/subcommands.c b/tests/modules/subcommands.c
new file mode 100644
index 0000000..1b2bc51
--- /dev/null
+++ b/tests/modules/subcommands.c
@@ -0,0 +1,112 @@
+#include "redismodule.h"
+
+#define UNUSED(V) ((void) V)
+
+int cmd_set(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+int cmd_get(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+
+ if (argc > 4) /* For testing */
+ return RedisModule_WrongArity(ctx);
+
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+ return REDISMODULE_OK;
+}
+
+int cmd_get_fullname(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ UNUSED(argv);
+ UNUSED(argc);
+
+ const char *command_name = RedisModule_GetCurrentCommandName(ctx);
+ RedisModule_ReplyWithSimpleString(ctx, command_name);
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx, "subcommands", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ /* Module command names cannot contain special characters. */
+ RedisModule_Assert(RedisModule_CreateCommand(ctx,"subcommands.char\r",NULL,"",0,0,0) == REDISMODULE_ERR);
+ RedisModule_Assert(RedisModule_CreateCommand(ctx,"subcommands.char\n",NULL,"",0,0,0) == REDISMODULE_ERR);
+ RedisModule_Assert(RedisModule_CreateCommand(ctx,"subcommands.char ",NULL,"",0,0,0) == REDISMODULE_ERR);
+
+ if (RedisModule_CreateCommand(ctx,"subcommands.bitarray",NULL,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ RedisModuleCommand *parent = RedisModule_GetCommand(ctx,"subcommands.bitarray");
+
+ if (RedisModule_CreateSubcommand(parent,"set",cmd_set,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ /* Module subcommand names cannot contain special characters. */
+ RedisModule_Assert(RedisModule_CreateSubcommand(parent,"char|",cmd_set,"",0,0,0) == REDISMODULE_ERR);
+ RedisModule_Assert(RedisModule_CreateSubcommand(parent,"char@",cmd_set,"",0,0,0) == REDISMODULE_ERR);
+ RedisModule_Assert(RedisModule_CreateSubcommand(parent,"char=",cmd_set,"",0,0,0) == REDISMODULE_ERR);
+
+ RedisModuleCommand *subcmd = RedisModule_GetCommand(ctx,"subcommands.bitarray|set");
+ RedisModuleCommandInfo cmd_set_info = {
+ .version = REDISMODULE_COMMAND_INFO_VERSION,
+ .key_specs = (RedisModuleCommandKeySpec[]){
+ {
+ .flags = REDISMODULE_CMD_KEY_RW | REDISMODULE_CMD_KEY_UPDATE,
+ .begin_search_type = REDISMODULE_KSPEC_BS_INDEX,
+ .bs.index.pos = 1,
+ .find_keys_type = REDISMODULE_KSPEC_FK_RANGE,
+ .fk.range = {0,1,0}
+ },
+ {0}
+ }
+ };
+ if (RedisModule_SetCommandInfo(subcmd, &cmd_set_info) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateSubcommand(parent,"get",cmd_get,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ subcmd = RedisModule_GetCommand(ctx,"subcommands.bitarray|get");
+ RedisModuleCommandInfo cmd_get_info = {
+ .version = REDISMODULE_COMMAND_INFO_VERSION,
+ .key_specs = (RedisModuleCommandKeySpec[]){
+ {
+ .flags = REDISMODULE_CMD_KEY_RO | REDISMODULE_CMD_KEY_ACCESS,
+ .begin_search_type = REDISMODULE_KSPEC_BS_INDEX,
+ .bs.index.pos = 1,
+ .find_keys_type = REDISMODULE_KSPEC_FK_RANGE,
+ .fk.range = {0,1,0}
+ },
+ {0}
+ }
+ };
+ if (RedisModule_SetCommandInfo(subcmd, &cmd_get_info) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ /* Get the name of the command currently running. */
+ if (RedisModule_CreateCommand(ctx,"subcommands.parent_get_fullname",cmd_get_fullname,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ /* Get the name of the subcommand currently running. */
+ if (RedisModule_CreateCommand(ctx,"subcommands.sub",NULL,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ RedisModuleCommand *fullname_parent = RedisModule_GetCommand(ctx,"subcommands.sub");
+ if (RedisModule_CreateSubcommand(fullname_parent,"get_fullname",cmd_get_fullname,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ /* Sanity */
+
+ /* Trying to create the same subcommand fails */
+ RedisModule_Assert(RedisModule_CreateSubcommand(parent,"get",NULL,"",0,0,0) == REDISMODULE_ERR);
+
+ /* Trying to create a sub-subcommand fails */
+ RedisModule_Assert(RedisModule_CreateSubcommand(subcmd,"get",NULL,"",0,0,0) == REDISMODULE_ERR);
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/test_lazyfree.c b/tests/modules/test_lazyfree.c
new file mode 100644
index 0000000..7ba213f
--- /dev/null
+++ b/tests/modules/test_lazyfree.c
@@ -0,0 +1,196 @@
+/* This module emulates a linked list for lazyfree testing of modules, which
+ is a simplified version of 'hellotype.c'
+ */
+#include "redismodule.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <string.h>
+#include <stdint.h>
+
+static RedisModuleType *LazyFreeLinkType;
+
+struct LazyFreeLinkNode {
+ int64_t value;
+ struct LazyFreeLinkNode *next;
+};
+
+struct LazyFreeLinkObject {
+ struct LazyFreeLinkNode *head;
+ size_t len; /* Number of elements added. */
+};
+
+struct LazyFreeLinkObject *createLazyFreeLinkObject(void) {
+ struct LazyFreeLinkObject *o;
+ o = RedisModule_Alloc(sizeof(*o));
+ o->head = NULL;
+ o->len = 0;
+ return o;
+}
+
+void LazyFreeLinkInsert(struct LazyFreeLinkObject *o, int64_t ele) {
+ struct LazyFreeLinkNode *next = o->head, *newnode, *prev = NULL;
+
+ while(next && next->value < ele) {
+ prev = next;
+ next = next->next;
+ }
+ newnode = RedisModule_Alloc(sizeof(*newnode));
+ newnode->value = ele;
+ newnode->next = next;
+ if (prev) {
+ prev->next = newnode;
+ } else {
+ o->head = newnode;
+ }
+ o->len++;
+}
+
+void LazyFreeLinkReleaseObject(struct LazyFreeLinkObject *o) {
+ struct LazyFreeLinkNode *cur, *next;
+ cur = o->head;
+ while(cur) {
+ next = cur->next;
+ RedisModule_Free(cur);
+ cur = next;
+ }
+ RedisModule_Free(o);
+}
+
+/* LAZYFREELINK.INSERT key value */
+int LazyFreeLinkInsert_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ RedisModule_AutoMemory(ctx); /* Use automatic memory management. */
+
+ if (argc != 3) return RedisModule_WrongArity(ctx);
+ RedisModuleKey *key = RedisModule_OpenKey(ctx,argv[1],
+ REDISMODULE_READ|REDISMODULE_WRITE);
+ int type = RedisModule_KeyType(key);
+ if (type != REDISMODULE_KEYTYPE_EMPTY &&
+ RedisModule_ModuleTypeGetType(key) != LazyFreeLinkType)
+ {
+ return RedisModule_ReplyWithError(ctx,REDISMODULE_ERRORMSG_WRONGTYPE);
+ }
+
+ long long value;
+ if ((RedisModule_StringToLongLong(argv[2],&value) != REDISMODULE_OK)) {
+ return RedisModule_ReplyWithError(ctx,"ERR invalid value: must be a signed 64 bit integer");
+ }
+
+ struct LazyFreeLinkObject *hto;
+ if (type == REDISMODULE_KEYTYPE_EMPTY) {
+ hto = createLazyFreeLinkObject();
+ RedisModule_ModuleTypeSetValue(key,LazyFreeLinkType,hto);
+ } else {
+ hto = RedisModule_ModuleTypeGetValue(key);
+ }
+
+ LazyFreeLinkInsert(hto,value);
+ RedisModule_SignalKeyAsReady(ctx,argv[1]);
+
+ RedisModule_ReplyWithLongLong(ctx,hto->len);
+ RedisModule_ReplicateVerbatim(ctx);
+ return REDISMODULE_OK;
+}
+
+/* LAZYFREELINK.LEN key */
+int LazyFreeLinkLen_RedisCommand(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ RedisModule_AutoMemory(ctx); /* Use automatic memory management. */
+
+ if (argc != 2) return RedisModule_WrongArity(ctx);
+ RedisModuleKey *key = RedisModule_OpenKey(ctx,argv[1],
+ REDISMODULE_READ);
+ int type = RedisModule_KeyType(key);
+ if (type != REDISMODULE_KEYTYPE_EMPTY &&
+ RedisModule_ModuleTypeGetType(key) != LazyFreeLinkType)
+ {
+ return RedisModule_ReplyWithError(ctx,REDISMODULE_ERRORMSG_WRONGTYPE);
+ }
+
+ struct LazyFreeLinkObject *hto = RedisModule_ModuleTypeGetValue(key);
+ RedisModule_ReplyWithLongLong(ctx,hto ? hto->len : 0);
+ return REDISMODULE_OK;
+}
+
+void *LazyFreeLinkRdbLoad(RedisModuleIO *rdb, int encver) {
+ if (encver != 0) {
+ return NULL;
+ }
+ uint64_t elements = RedisModule_LoadUnsigned(rdb);
+ struct LazyFreeLinkObject *hto = createLazyFreeLinkObject();
+ while(elements--) {
+ int64_t ele = RedisModule_LoadSigned(rdb);
+ LazyFreeLinkInsert(hto,ele);
+ }
+ return hto;
+}
+
+void LazyFreeLinkRdbSave(RedisModuleIO *rdb, void *value) {
+ struct LazyFreeLinkObject *hto = value;
+ struct LazyFreeLinkNode *node = hto->head;
+ RedisModule_SaveUnsigned(rdb,hto->len);
+ while(node) {
+ RedisModule_SaveSigned(rdb,node->value);
+ node = node->next;
+ }
+}
+
+void LazyFreeLinkAofRewrite(RedisModuleIO *aof, RedisModuleString *key, void *value) {
+ struct LazyFreeLinkObject *hto = value;
+ struct LazyFreeLinkNode *node = hto->head;
+ while(node) {
+ RedisModule_EmitAOF(aof,"LAZYFREELINK.INSERT","sl",key,node->value);
+ node = node->next;
+ }
+}
+
+void LazyFreeLinkFree(void *value) {
+ LazyFreeLinkReleaseObject(value);
+}
+
+size_t LazyFreeLinkFreeEffort(RedisModuleString *key, const void *value) {
+ REDISMODULE_NOT_USED(key);
+ const struct LazyFreeLinkObject *hto = value;
+ return hto->len;
+}
+
+void LazyFreeLinkUnlink(RedisModuleString *key, const void *value) {
+ REDISMODULE_NOT_USED(key);
+ REDISMODULE_NOT_USED(value);
+ /* Here you can know which key and value is about to be freed. */
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx,"lazyfreetest",1,REDISMODULE_APIVER_1)
+ == REDISMODULE_ERR) return REDISMODULE_ERR;
+
+ /* We only allow our module to be loaded when the redis core version is greater than the version of my module */
+ if (RedisModule_GetTypeMethodVersion() < REDISMODULE_TYPE_METHOD_VERSION) {
+ return REDISMODULE_ERR;
+ }
+
+ RedisModuleTypeMethods tm = {
+ .version = REDISMODULE_TYPE_METHOD_VERSION,
+ .rdb_load = LazyFreeLinkRdbLoad,
+ .rdb_save = LazyFreeLinkRdbSave,
+ .aof_rewrite = LazyFreeLinkAofRewrite,
+ .free = LazyFreeLinkFree,
+ .free_effort = LazyFreeLinkFreeEffort,
+ .unlink = LazyFreeLinkUnlink,
+ };
+
+ LazyFreeLinkType = RedisModule_CreateDataType(ctx,"test_lazy",0,&tm);
+ if (LazyFreeLinkType == NULL) return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"lazyfreelink.insert",
+ LazyFreeLinkInsert_RedisCommand,"write deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"lazyfreelink.len",
+ LazyFreeLinkLen_RedisCommand,"readonly",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/testrdb.c b/tests/modules/testrdb.c
new file mode 100644
index 0000000..c31aebb
--- /dev/null
+++ b/tests/modules/testrdb.c
@@ -0,0 +1,405 @@
+#include "redismodule.h"
+
+#include <string.h>
+#include <assert.h>
+
+/* Module configuration, save aux or not? */
+#define CONF_AUX_OPTION_NO_AUX 0
+#define CONF_AUX_OPTION_SAVE2 1 << 0
+#define CONF_AUX_OPTION_BEFORE_KEYSPACE 1 << 1
+#define CONF_AUX_OPTION_AFTER_KEYSPACE 1 << 2
+#define CONF_AUX_OPTION_NO_DATA 1 << 3
+long long conf_aux_count = 0;
+
+/* Registered type */
+RedisModuleType *testrdb_type = NULL;
+
+/* Global values to store and persist to aux */
+RedisModuleString *before_str = NULL;
+RedisModuleString *after_str = NULL;
+
+/* Global values used to keep aux from db being loaded (in case of async_loading) */
+RedisModuleString *before_str_temp = NULL;
+RedisModuleString *after_str_temp = NULL;
+
+/* Indicates whether there is an async replication in progress.
+ * We control this value from RedisModuleEvent_ReplAsyncLoad events. */
+int async_loading = 0;
+
+int n_aux_load_called = 0;
+
+void replAsyncLoadCallback(RedisModuleCtx *ctx, RedisModuleEvent e, uint64_t sub, void *data)
+{
+ REDISMODULE_NOT_USED(e);
+ REDISMODULE_NOT_USED(data);
+
+ switch (sub) {
+ case REDISMODULE_SUBEVENT_REPL_ASYNC_LOAD_STARTED:
+ assert(async_loading == 0);
+ async_loading = 1;
+ break;
+ case REDISMODULE_SUBEVENT_REPL_ASYNC_LOAD_ABORTED:
+ /* Discard temp aux */
+ if (before_str_temp)
+ RedisModule_FreeString(ctx, before_str_temp);
+ if (after_str_temp)
+ RedisModule_FreeString(ctx, after_str_temp);
+ before_str_temp = NULL;
+ after_str_temp = NULL;
+
+ async_loading = 0;
+ break;
+ case REDISMODULE_SUBEVENT_REPL_ASYNC_LOAD_COMPLETED:
+ if (before_str)
+ RedisModule_FreeString(ctx, before_str);
+ if (after_str)
+ RedisModule_FreeString(ctx, after_str);
+ before_str = before_str_temp;
+ after_str = after_str_temp;
+
+ before_str_temp = NULL;
+ after_str_temp = NULL;
+
+ async_loading = 0;
+ break;
+ default:
+ assert(0);
+ }
+}
+
+void *testrdb_type_load(RedisModuleIO *rdb, int encver) {
+ int count = RedisModule_LoadSigned(rdb);
+ RedisModuleString *str = RedisModule_LoadString(rdb);
+ float f = RedisModule_LoadFloat(rdb);
+ long double ld = RedisModule_LoadLongDouble(rdb);
+ if (RedisModule_IsIOError(rdb)) {
+ RedisModuleCtx *ctx = RedisModule_GetContextFromIO(rdb);
+ if (str)
+ RedisModule_FreeString(ctx, str);
+ return NULL;
+ }
+ /* Using the values only after checking for io errors. */
+ assert(count==1);
+ assert(encver==1);
+ assert(f==1.5f);
+ assert(ld==0.333333333333333333L);
+ return str;
+}
+
+void testrdb_type_save(RedisModuleIO *rdb, void *value) {
+ RedisModuleString *str = (RedisModuleString*)value;
+ RedisModule_SaveSigned(rdb, 1);
+ RedisModule_SaveString(rdb, str);
+ RedisModule_SaveFloat(rdb, 1.5);
+ RedisModule_SaveLongDouble(rdb, 0.333333333333333333L);
+}
+
+void testrdb_aux_save(RedisModuleIO *rdb, int when) {
+ if (!(conf_aux_count & CONF_AUX_OPTION_BEFORE_KEYSPACE)) assert(when == REDISMODULE_AUX_AFTER_RDB);
+ if (!(conf_aux_count & CONF_AUX_OPTION_AFTER_KEYSPACE)) assert(when == REDISMODULE_AUX_BEFORE_RDB);
+ assert(conf_aux_count!=CONF_AUX_OPTION_NO_AUX);
+ if (when == REDISMODULE_AUX_BEFORE_RDB) {
+ if (before_str) {
+ RedisModule_SaveSigned(rdb, 1);
+ RedisModule_SaveString(rdb, before_str);
+ } else {
+ RedisModule_SaveSigned(rdb, 0);
+ }
+ } else {
+ if (after_str) {
+ RedisModule_SaveSigned(rdb, 1);
+ RedisModule_SaveString(rdb, after_str);
+ } else {
+ RedisModule_SaveSigned(rdb, 0);
+ }
+ }
+}
+
+int testrdb_aux_load(RedisModuleIO *rdb, int encver, int when) {
+ assert(encver == 1);
+ if (!(conf_aux_count & CONF_AUX_OPTION_BEFORE_KEYSPACE)) assert(when == REDISMODULE_AUX_AFTER_RDB);
+ if (!(conf_aux_count & CONF_AUX_OPTION_AFTER_KEYSPACE)) assert(when == REDISMODULE_AUX_BEFORE_RDB);
+ assert(conf_aux_count!=CONF_AUX_OPTION_NO_AUX);
+ RedisModuleCtx *ctx = RedisModule_GetContextFromIO(rdb);
+ if (when == REDISMODULE_AUX_BEFORE_RDB) {
+ if (async_loading == 0) {
+ if (before_str)
+ RedisModule_FreeString(ctx, before_str);
+ before_str = NULL;
+ int count = RedisModule_LoadSigned(rdb);
+ if (RedisModule_IsIOError(rdb))
+ return REDISMODULE_ERR;
+ if (count)
+ before_str = RedisModule_LoadString(rdb);
+ } else {
+ if (before_str_temp)
+ RedisModule_FreeString(ctx, before_str_temp);
+ before_str_temp = NULL;
+ int count = RedisModule_LoadSigned(rdb);
+ if (RedisModule_IsIOError(rdb))
+ return REDISMODULE_ERR;
+ if (count)
+ before_str_temp = RedisModule_LoadString(rdb);
+ }
+ } else {
+ if (async_loading == 0) {
+ if (after_str)
+ RedisModule_FreeString(ctx, after_str);
+ after_str = NULL;
+ int count = RedisModule_LoadSigned(rdb);
+ if (RedisModule_IsIOError(rdb))
+ return REDISMODULE_ERR;
+ if (count)
+ after_str = RedisModule_LoadString(rdb);
+ } else {
+ if (after_str_temp)
+ RedisModule_FreeString(ctx, after_str_temp);
+ after_str_temp = NULL;
+ int count = RedisModule_LoadSigned(rdb);
+ if (RedisModule_IsIOError(rdb))
+ return REDISMODULE_ERR;
+ if (count)
+ after_str_temp = RedisModule_LoadString(rdb);
+ }
+ }
+
+ if (RedisModule_IsIOError(rdb))
+ return REDISMODULE_ERR;
+ return REDISMODULE_OK;
+}
+
+void testrdb_type_free(void *value) {
+ if (value)
+ RedisModule_FreeString(NULL, (RedisModuleString*)value);
+}
+
+int testrdb_set_before(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc != 2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ if (before_str)
+ RedisModule_FreeString(ctx, before_str);
+ before_str = argv[1];
+ RedisModule_RetainString(ctx, argv[1]);
+ RedisModule_ReplyWithLongLong(ctx, 1);
+ return REDISMODULE_OK;
+}
+
+int testrdb_get_before(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ if (argc != 1){
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+ if (before_str)
+ RedisModule_ReplyWithString(ctx, before_str);
+ else
+ RedisModule_ReplyWithStringBuffer(ctx, "", 0);
+ return REDISMODULE_OK;
+}
+
+/* For purpose of testing module events, expose variable state during async_loading. */
+int testrdb_async_loading_get_before(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ if (argc != 1){
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+ if (before_str_temp)
+ RedisModule_ReplyWithString(ctx, before_str_temp);
+ else
+ RedisModule_ReplyWithStringBuffer(ctx, "", 0);
+ return REDISMODULE_OK;
+}
+
+int testrdb_set_after(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc != 2){
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ if (after_str)
+ RedisModule_FreeString(ctx, after_str);
+ after_str = argv[1];
+ RedisModule_RetainString(ctx, argv[1]);
+ RedisModule_ReplyWithLongLong(ctx, 1);
+ return REDISMODULE_OK;
+}
+
+int testrdb_get_after(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(argv);
+ if (argc != 1){
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+ if (after_str)
+ RedisModule_ReplyWithString(ctx, after_str);
+ else
+ RedisModule_ReplyWithStringBuffer(ctx, "", 0);
+ return REDISMODULE_OK;
+}
+
+int testrdb_set_key(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc != 3){
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_WRITE);
+ RedisModuleString *str = RedisModule_ModuleTypeGetValue(key);
+ if (str)
+ RedisModule_FreeString(ctx, str);
+ RedisModule_ModuleTypeSetValue(key, testrdb_type, argv[2]);
+ RedisModule_RetainString(ctx, argv[2]);
+ RedisModule_CloseKey(key);
+ RedisModule_ReplyWithLongLong(ctx, 1);
+ return REDISMODULE_OK;
+}
+
+int testrdb_get_key(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc != 2){
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], REDISMODULE_READ);
+ RedisModuleString *str = RedisModule_ModuleTypeGetValue(key);
+ RedisModule_CloseKey(key);
+ RedisModule_ReplyWithString(ctx, str);
+ return REDISMODULE_OK;
+}
+
+int testrdb_get_n_aux_load_called(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ REDISMODULE_NOT_USED(ctx);
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ RedisModule_ReplyWithLongLong(ctx, n_aux_load_called);
+ return REDISMODULE_OK;
+}
+
+int test2rdb_aux_load(RedisModuleIO *rdb, int encver, int when) {
+ REDISMODULE_NOT_USED(rdb);
+ REDISMODULE_NOT_USED(encver);
+ REDISMODULE_NOT_USED(when);
+ n_aux_load_called++;
+ return REDISMODULE_OK;
+}
+
+void test2rdb_aux_save(RedisModuleIO *rdb, int when) {
+ REDISMODULE_NOT_USED(rdb);
+ REDISMODULE_NOT_USED(when);
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ if (RedisModule_Init(ctx,"testrdb",1,REDISMODULE_APIVER_1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ RedisModule_SetModuleOptions(ctx, REDISMODULE_OPTIONS_HANDLE_IO_ERRORS | REDISMODULE_OPTIONS_HANDLE_REPL_ASYNC_LOAD);
+
+ if (argc > 0)
+ RedisModule_StringToLongLong(argv[0], &conf_aux_count);
+
+ if (conf_aux_count==CONF_AUX_OPTION_NO_AUX) {
+ RedisModuleTypeMethods datatype_methods = {
+ .version = 1,
+ .rdb_load = testrdb_type_load,
+ .rdb_save = testrdb_type_save,
+ .aof_rewrite = NULL,
+ .digest = NULL,
+ .free = testrdb_type_free,
+ };
+
+ testrdb_type = RedisModule_CreateDataType(ctx, "test__rdb", 1, &datatype_methods);
+ if (testrdb_type == NULL)
+ return REDISMODULE_ERR;
+ } else if (!(conf_aux_count & CONF_AUX_OPTION_NO_DATA)) {
+ RedisModuleTypeMethods datatype_methods = {
+ .version = REDISMODULE_TYPE_METHOD_VERSION,
+ .rdb_load = testrdb_type_load,
+ .rdb_save = testrdb_type_save,
+ .aof_rewrite = NULL,
+ .digest = NULL,
+ .free = testrdb_type_free,
+ .aux_load = testrdb_aux_load,
+ .aux_save = testrdb_aux_save,
+ .aux_save_triggers = ((conf_aux_count & CONF_AUX_OPTION_BEFORE_KEYSPACE) ? REDISMODULE_AUX_BEFORE_RDB : 0) |
+ ((conf_aux_count & CONF_AUX_OPTION_AFTER_KEYSPACE) ? REDISMODULE_AUX_AFTER_RDB : 0)
+ };
+
+ if (conf_aux_count & CONF_AUX_OPTION_SAVE2) {
+ datatype_methods.aux_save2 = testrdb_aux_save;
+ }
+
+ testrdb_type = RedisModule_CreateDataType(ctx, "test__rdb", 1, &datatype_methods);
+ if (testrdb_type == NULL)
+ return REDISMODULE_ERR;
+ } else {
+
+ /* Used to verify that aux_save2 api without any data, saves nothing to the RDB. */
+ RedisModuleTypeMethods datatype_methods = {
+ .version = REDISMODULE_TYPE_METHOD_VERSION,
+ .aux_load = test2rdb_aux_load,
+ .aux_save = test2rdb_aux_save,
+ .aux_save_triggers = ((conf_aux_count & CONF_AUX_OPTION_BEFORE_KEYSPACE) ? REDISMODULE_AUX_BEFORE_RDB : 0) |
+ ((conf_aux_count & CONF_AUX_OPTION_AFTER_KEYSPACE) ? REDISMODULE_AUX_AFTER_RDB : 0)
+ };
+ if (conf_aux_count & CONF_AUX_OPTION_SAVE2) {
+ datatype_methods.aux_save2 = test2rdb_aux_save;
+ }
+
+ RedisModule_CreateDataType(ctx, "test__rdb", 1, &datatype_methods);
+ }
+
+ if (RedisModule_CreateCommand(ctx,"testrdb.set.before", testrdb_set_before,"deny-oom",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"testrdb.get.before", testrdb_get_before,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"testrdb.async_loading.get.before", testrdb_async_loading_get_before,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"testrdb.set.after", testrdb_set_after,"deny-oom",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"testrdb.get.after", testrdb_get_after,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"testrdb.set.key", testrdb_set_key,"deny-oom",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"testrdb.get.key", testrdb_get_key,"",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"testrdb.get.n_aux_load_called", testrdb_get_n_aux_load_called,"",1,1,1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ RedisModule_SubscribeToServerEvent(ctx,
+ RedisModuleEvent_ReplAsyncLoad, replAsyncLoadCallback);
+
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnUnload(RedisModuleCtx *ctx) {
+ if (before_str)
+ RedisModule_FreeString(ctx, before_str);
+ if (after_str)
+ RedisModule_FreeString(ctx, after_str);
+ if (before_str_temp)
+ RedisModule_FreeString(ctx, before_str_temp);
+ if (after_str_temp)
+ RedisModule_FreeString(ctx, after_str_temp);
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/timer.c b/tests/modules/timer.c
new file mode 100644
index 0000000..c9bd636
--- /dev/null
+++ b/tests/modules/timer.c
@@ -0,0 +1,102 @@
+
+#include "redismodule.h"
+
+static void timer_callback(RedisModuleCtx *ctx, void *data)
+{
+ RedisModuleString *keyname = data;
+ RedisModuleCallReply *reply;
+
+ reply = RedisModule_Call(ctx, "INCR", "s", keyname);
+ if (reply != NULL)
+ RedisModule_FreeCallReply(reply);
+ RedisModule_FreeString(ctx, keyname);
+}
+
+int test_createtimer(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc != 3) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ long long period;
+ if (RedisModule_StringToLongLong(argv[1], &period) == REDISMODULE_ERR) {
+ RedisModule_ReplyWithError(ctx, "Invalid time specified.");
+ return REDISMODULE_OK;
+ }
+
+ RedisModuleString *keyname = argv[2];
+ RedisModule_RetainString(ctx, keyname);
+
+ RedisModuleTimerID id = RedisModule_CreateTimer(ctx, period, timer_callback, keyname);
+ RedisModule_ReplyWithLongLong(ctx, id);
+
+ return REDISMODULE_OK;
+}
+
+int test_gettimer(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc != 2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ long long id;
+ if (RedisModule_StringToLongLong(argv[1], &id) == REDISMODULE_ERR) {
+ RedisModule_ReplyWithError(ctx, "Invalid id specified.");
+ return REDISMODULE_OK;
+ }
+
+ uint64_t remaining;
+ RedisModuleString *keyname;
+ if (RedisModule_GetTimerInfo(ctx, id, &remaining, (void **)&keyname) == REDISMODULE_ERR) {
+ RedisModule_ReplyWithNull(ctx);
+ } else {
+ RedisModule_ReplyWithArray(ctx, 2);
+ RedisModule_ReplyWithString(ctx, keyname);
+ RedisModule_ReplyWithLongLong(ctx, remaining);
+ }
+
+ return REDISMODULE_OK;
+}
+
+int test_stoptimer(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ if (argc != 2) {
+ RedisModule_WrongArity(ctx);
+ return REDISMODULE_OK;
+ }
+
+ long long id;
+ if (RedisModule_StringToLongLong(argv[1], &id) == REDISMODULE_ERR) {
+ RedisModule_ReplyWithError(ctx, "Invalid id specified.");
+ return REDISMODULE_OK;
+ }
+
+ int ret = 0;
+ RedisModuleString *keyname;
+ if (RedisModule_StopTimer(ctx, id, (void **) &keyname) == REDISMODULE_OK) {
+ RedisModule_FreeString(ctx, keyname);
+ ret = 1;
+ }
+
+ RedisModule_ReplyWithLongLong(ctx, ret);
+ return REDISMODULE_OK;
+}
+
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ if (RedisModule_Init(ctx,"timer",1,REDISMODULE_APIVER_1)== REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"test.createtimer", test_createtimer,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.gettimer", test_gettimer,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+ if (RedisModule_CreateCommand(ctx,"test.stoptimer", test_stoptimer,"",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/usercall.c b/tests/modules/usercall.c
new file mode 100644
index 0000000..6b23974
--- /dev/null
+++ b/tests/modules/usercall.c
@@ -0,0 +1,228 @@
+#include "redismodule.h"
+#include <pthread.h>
+#include <assert.h>
+
+#define UNUSED(V) ((void) V)
+
+RedisModuleUser *user = NULL;
+
+int call_without_user(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc < 2) {
+ return RedisModule_WrongArity(ctx);
+ }
+
+ const char *cmd = RedisModule_StringPtrLen(argv[1], NULL);
+
+ RedisModuleCallReply *rep = RedisModule_Call(ctx, cmd, "Ev", argv + 2, argc - 2);
+ if (!rep) {
+ RedisModule_ReplyWithError(ctx, "NULL reply returned");
+ } else {
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ }
+ return REDISMODULE_OK;
+}
+
+int call_with_user_flag(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc < 3) {
+ return RedisModule_WrongArity(ctx);
+ }
+
+ RedisModule_SetContextUser(ctx, user);
+
+ /* Append Ev to the provided flags. */
+ RedisModuleString *flags = RedisModule_CreateStringFromString(ctx, argv[1]);
+ RedisModule_StringAppendBuffer(ctx, flags, "Ev", 2);
+
+ const char* flg = RedisModule_StringPtrLen(flags, NULL);
+ const char* cmd = RedisModule_StringPtrLen(argv[2], NULL);
+
+ RedisModuleCallReply* rep = RedisModule_Call(ctx, cmd, flg, argv + 3, argc - 3);
+ if (!rep) {
+ RedisModule_ReplyWithError(ctx, "NULL reply returned");
+ } else {
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ }
+ RedisModule_FreeString(ctx, flags);
+
+ return REDISMODULE_OK;
+}
+
+int add_to_acl(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 2) {
+ return RedisModule_WrongArity(ctx);
+ }
+
+ size_t acl_len;
+ const char *acl = RedisModule_StringPtrLen(argv[1], &acl_len);
+
+ RedisModuleString *error;
+ int ret = RedisModule_SetModuleUserACLString(ctx, user, acl, &error);
+ if (ret) {
+ size_t len;
+ const char * e = RedisModule_StringPtrLen(error, &len);
+ RedisModule_ReplyWithError(ctx, e);
+ return REDISMODULE_OK;
+ }
+
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+
+ return REDISMODULE_OK;
+}
+
+int get_acl(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+
+ if (argc != 1) {
+ return RedisModule_WrongArity(ctx);
+ }
+
+ RedisModule_Assert(user != NULL);
+
+ RedisModuleString *acl = RedisModule_GetModuleUserACLString(user);
+
+ RedisModule_ReplyWithString(ctx, acl);
+
+ RedisModule_FreeString(NULL, acl);
+
+ return REDISMODULE_OK;
+}
+
+int reset_user(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+
+ if (argc != 1) {
+ return RedisModule_WrongArity(ctx);
+ }
+
+ if (user != NULL) {
+ RedisModule_FreeModuleUser(user);
+ }
+
+ user = RedisModule_CreateModuleUser("module_user");
+
+ RedisModule_ReplyWithSimpleString(ctx, "OK");
+
+ return REDISMODULE_OK;
+}
+
+typedef struct {
+ RedisModuleString **argv;
+ int argc;
+ RedisModuleBlockedClient *bc;
+} bg_call_data;
+
+void *bg_call_worker(void *arg) {
+ bg_call_data *bg = arg;
+
+ // Get Redis module context
+ RedisModuleCtx *ctx = RedisModule_GetThreadSafeContext(bg->bc);
+
+ // Acquire GIL
+ RedisModule_ThreadSafeContextLock(ctx);
+
+ // Set user
+ RedisModule_SetContextUser(ctx, user);
+
+ // Call the command
+ size_t format_len;
+ RedisModuleString *format_redis_str = RedisModule_CreateString(NULL, "v", 1);
+ const char *format = RedisModule_StringPtrLen(bg->argv[1], &format_len);
+ RedisModule_StringAppendBuffer(NULL, format_redis_str, format, format_len);
+ RedisModule_StringAppendBuffer(NULL, format_redis_str, "E", 1);
+ format = RedisModule_StringPtrLen(format_redis_str, NULL);
+ const char *cmd = RedisModule_StringPtrLen(bg->argv[2], NULL);
+ RedisModuleCallReply *rep = RedisModule_Call(ctx, cmd, format, bg->argv + 3, bg->argc - 3);
+ RedisModule_FreeString(NULL, format_redis_str);
+
+ // Release GIL
+ RedisModule_ThreadSafeContextUnlock(ctx);
+
+ // Reply to client
+ if (!rep) {
+ RedisModule_ReplyWithError(ctx, "NULL reply returned");
+ } else {
+ RedisModule_ReplyWithCallReply(ctx, rep);
+ RedisModule_FreeCallReply(rep);
+ }
+
+ // Unblock client
+ RedisModule_UnblockClient(bg->bc, NULL);
+
+ /* Free the arguments */
+ for (int i=0; i<bg->argc; i++)
+ RedisModule_FreeString(ctx, bg->argv[i]);
+ RedisModule_Free(bg->argv);
+ RedisModule_Free(bg);
+
+ // Free the Redis module context
+ RedisModule_FreeThreadSafeContext(ctx);
+
+ return NULL;
+}
+
+int call_with_user_bg(RedisModuleCtx *ctx, RedisModuleString **argv, int argc)
+{
+ UNUSED(argv);
+ UNUSED(argc);
+
+ /* Make sure we're not trying to block a client when we shouldn't */
+ int flags = RedisModule_GetContextFlags(ctx);
+ int allFlags = RedisModule_GetContextFlagsAll();
+ if ((allFlags & REDISMODULE_CTX_FLAGS_MULTI) &&
+ (flags & REDISMODULE_CTX_FLAGS_MULTI)) {
+ RedisModule_ReplyWithSimpleString(ctx, "Blocked client is not supported inside multi");
+ return REDISMODULE_OK;
+ }
+ if ((allFlags & REDISMODULE_CTX_FLAGS_DENY_BLOCKING) &&
+ (flags & REDISMODULE_CTX_FLAGS_DENY_BLOCKING)) {
+ RedisModule_ReplyWithSimpleString(ctx, "Blocked client is not allowed");
+ return REDISMODULE_OK;
+ }
+
+ /* Make a copy of the arguments and pass them to the thread. */
+ bg_call_data *bg = RedisModule_Alloc(sizeof(bg_call_data));
+ bg->argv = RedisModule_Alloc(sizeof(RedisModuleString*)*argc);
+ bg->argc = argc;
+ for (int i=0; i<argc; i++)
+ bg->argv[i] = RedisModule_HoldString(ctx, argv[i]);
+
+ /* Block the client */
+ bg->bc = RedisModule_BlockClient(ctx, NULL, NULL, NULL, 0);
+
+ /* Start a thread to handle the request */
+ pthread_t tid;
+ int res = pthread_create(&tid, NULL, bg_call_worker, bg);
+ assert(res == 0);
+
+ return REDISMODULE_OK;
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+
+ if (RedisModule_Init(ctx,"usercall",1,REDISMODULE_APIVER_1)== REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"usercall.call_without_user", call_without_user,"write",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"usercall.call_with_user_flag", call_with_user_flag,"write",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "usercall.call_with_user_bg", call_with_user_bg, "write", 0, 0, 0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "usercall.add_to_acl", add_to_acl, "write",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"usercall.reset_user", reset_user,"write",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx,"usercall.get_acl", get_acl,"write",0,0,0) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/modules/zset.c b/tests/modules/zset.c
new file mode 100644
index 0000000..13f2ab3
--- /dev/null
+++ b/tests/modules/zset.c
@@ -0,0 +1,91 @@
+#include "redismodule.h"
+#include <math.h>
+#include <errno.h>
+
+/* ZSET.REM key element
+ *
+ * Removes an occurrence of an element from a sorted set. Replies with the
+ * number of removed elements (0 or 1).
+ */
+int zset_rem(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 3) return RedisModule_WrongArity(ctx);
+ RedisModule_AutoMemory(ctx);
+ int keymode = REDISMODULE_READ | REDISMODULE_WRITE;
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], keymode);
+ int deleted;
+ if (RedisModule_ZsetRem(key, argv[2], &deleted) == REDISMODULE_OK)
+ return RedisModule_ReplyWithLongLong(ctx, deleted);
+ else
+ return RedisModule_ReplyWithError(ctx, "ERR ZsetRem failed");
+}
+
+/* ZSET.ADD key score member
+ *
+ * Adds a specified member with the specified score to the sorted
+ * set stored at key.
+ */
+int zset_add(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 4) return RedisModule_WrongArity(ctx);
+ RedisModule_AutoMemory(ctx);
+ int keymode = REDISMODULE_READ | REDISMODULE_WRITE;
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], keymode);
+
+ size_t len;
+ double score;
+ char *endptr;
+ const char *str = RedisModule_StringPtrLen(argv[2], &len);
+ score = strtod(str, &endptr);
+ if (*endptr != '\0' || errno == ERANGE)
+ return RedisModule_ReplyWithError(ctx, "value is not a valid float");
+
+ if (RedisModule_ZsetAdd(key, score, argv[3], NULL) == REDISMODULE_OK)
+ return RedisModule_ReplyWithSimpleString(ctx, "OK");
+ else
+ return RedisModule_ReplyWithError(ctx, "ERR ZsetAdd failed");
+}
+
+/* ZSET.INCRBY key member increment
+ *
+ * Increments the score stored at member in the sorted set stored at key by increment.
+ * Replies with the new score of this element.
+ */
+int zset_incrby(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ if (argc != 4) return RedisModule_WrongArity(ctx);
+ RedisModule_AutoMemory(ctx);
+ int keymode = REDISMODULE_READ | REDISMODULE_WRITE;
+ RedisModuleKey *key = RedisModule_OpenKey(ctx, argv[1], keymode);
+
+ size_t len;
+ double score, newscore;
+ char *endptr;
+ const char *str = RedisModule_StringPtrLen(argv[3], &len);
+ score = strtod(str, &endptr);
+ if (*endptr != '\0' || errno == ERANGE)
+ return RedisModule_ReplyWithError(ctx, "value is not a valid float");
+
+ if (RedisModule_ZsetIncrby(key, score, argv[2], NULL, &newscore) == REDISMODULE_OK)
+ return RedisModule_ReplyWithDouble(ctx, newscore);
+ else
+ return RedisModule_ReplyWithError(ctx, "ERR ZsetIncrby failed");
+}
+
+int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) {
+ REDISMODULE_NOT_USED(argv);
+ REDISMODULE_NOT_USED(argc);
+ if (RedisModule_Init(ctx, "zset", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "zset.rem", zset_rem, "write",
+ 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "zset.add", zset_add, "write",
+ 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ if (RedisModule_CreateCommand(ctx, "zset.incrby", zset_incrby, "write",
+ 1, 1, 1) == REDISMODULE_ERR)
+ return REDISMODULE_ERR;
+
+ return REDISMODULE_OK;
+}
diff --git a/tests/sentinel/run.tcl b/tests/sentinel/run.tcl
new file mode 100644
index 0000000..6d3db32
--- /dev/null
+++ b/tests/sentinel/run.tcl
@@ -0,0 +1,36 @@
+# Sentinel test suite. Copyright (C) 2014 Salvatore Sanfilippo antirez@gmail.com
+# This software is released under the BSD License. See the COPYING file for
+# more information.
+
+cd tests/sentinel
+source ../instances.tcl
+
+set ::instances_count 5 ; # How many instances we use at max.
+set ::tlsdir "../../tls"
+
+proc main {} {
+ parse_options
+ if {$::leaked_fds_file != ""} {
+ set ::env(LEAKED_FDS_FILE) $::leaked_fds_file
+ }
+ spawn_instance sentinel $::sentinel_base_port $::instances_count {
+ "sentinel deny-scripts-reconfig no"
+ "enable-protected-configs yes"
+ "enable-debug-command yes"
+ } "../tests/includes/sentinel.conf"
+
+ spawn_instance redis $::redis_base_port $::instances_count {
+ "enable-protected-configs yes"
+ "enable-debug-command yes"
+ "save ''"
+ }
+ run_tests
+ cleanup
+ end_tests
+}
+
+if {[catch main e]} {
+ puts $::errorInfo
+ cleanup
+ exit 1
+}
diff --git a/tests/sentinel/tests/00-base.tcl b/tests/sentinel/tests/00-base.tcl
new file mode 100644
index 0000000..7b64395
--- /dev/null
+++ b/tests/sentinel/tests/00-base.tcl
@@ -0,0 +1,210 @@
+# Check the basic monitoring and failover capabilities.
+source "../tests/includes/start-init-tests.tcl"
+source "../tests/includes/init-tests.tcl"
+
+foreach_sentinel_id id {
+ S $id sentinel debug default-down-after 1000
+}
+
+if {$::simulate_error} {
+ test "This test will fail" {
+ fail "Simulated error"
+ }
+}
+
+test "Sentinel command flag infrastructure works correctly" {
+ foreach_sentinel_id id {
+ set command_list [S $id command list]
+
+ foreach cmd {ping info subscribe client|setinfo} {
+ assert_not_equal [S $id command docs $cmd] {}
+ assert_not_equal [lsearch $command_list $cmd] -1
+ }
+
+ foreach cmd {save bgrewriteaof blpop replicaof} {
+ assert_equal [S $id command docs $cmd] {}
+ assert_equal [lsearch $command_list $cmd] -1
+ assert_error {ERR unknown command*} {S $id $cmd}
+ }
+
+ assert_error {ERR unknown subcommand*} {S $id client no-touch}
+ }
+}
+
+test "SENTINEL HELP output the sentinel subcommand help" {
+ assert_match "*SENTINEL <subcommand> *" [S 0 SENTINEL HELP]
+}
+
+test "SENTINEL MYID return the sentinel instance ID" {
+ assert_equal 40 [string length [S 0 SENTINEL MYID]]
+ assert_equal [S 0 SENTINEL MYID] [S 0 SENTINEL MYID]
+}
+
+test "SENTINEL INFO CACHE returns the cached info" {
+ set res [S 0 SENTINEL INFO-CACHE mymaster]
+ assert_morethan_equal [llength $res] 2
+ assert_equal "mymaster" [lindex $res 0]
+
+ set res [lindex $res 1]
+ assert_morethan_equal [llength $res] 2
+ assert_morethan [lindex $res 0] 0
+ assert_match "*# Server*" [lindex $res 1]
+}
+
+test "SENTINEL PENDING-SCRIPTS returns the information about pending scripts" {
+ # may or may not have a value, so assert greater than or equal to 0.
+ assert_morethan_equal [llength [S 0 SENTINEL PENDING-SCRIPTS]] 0
+}
+
+test "SENTINEL MASTERS returns a list of monitored masters" {
+ assert_match "*mymaster*" [S 0 SENTINEL MASTERS]
+ assert_morethan_equal [llength [S 0 SENTINEL MASTERS]] 1
+}
+
+test "SENTINEL SENTINELS returns a list of sentinel instances" {
+ assert_morethan_equal [llength [S 0 SENTINEL SENTINELS mymaster]] 1
+}
+
+test "SENTINEL SLAVES returns a list of the monitored replicas" {
+ assert_morethan_equal [llength [S 0 SENTINEL SLAVES mymaster]] 1
+}
+
+test "SENTINEL SIMULATE-FAILURE HELP list supported flags" {
+ set res [S 0 SENTINEL SIMULATE-FAILURE HELP]
+ assert_morethan_equal [llength $res] 2
+ assert_equal {crash-after-election crash-after-promotion} $res
+}
+
+test "Basic failover works if the master is down" {
+ set old_port [RPort $master_id]
+ set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
+ assert {[lindex $addr 1] == $old_port}
+ kill_instance redis $master_id
+ foreach_sentinel_id id {
+ S $id sentinel debug ping-period 500
+ S $id sentinel debug ask-period 500
+ wait_for_condition 1000 100 {
+ [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port
+ } else {
+ fail "At least one Sentinel did not receive failover info"
+ }
+ }
+ restart_instance redis $master_id
+ set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
+ set master_id [get_instance_id_by_port redis [lindex $addr 1]]
+}
+
+test "New master [join $addr {:}] role matches" {
+ assert {[RI $master_id role] eq {master}}
+}
+
+test "All the other slaves now point to the new master" {
+ foreach_redis_id id {
+ if {$id != $master_id && $id != 0} {
+ wait_for_condition 1000 50 {
+ [RI $id master_port] == [lindex $addr 1]
+ } else {
+ fail "Redis ID $id not configured to replicate with new master"
+ }
+ }
+ }
+}
+
+test "The old master eventually gets reconfigured as a slave" {
+ wait_for_condition 1000 50 {
+ [RI 0 master_port] == [lindex $addr 1]
+ } else {
+ fail "Old master not reconfigured as slave of new master"
+ }
+}
+
+test "ODOWN is not possible without N (quorum) Sentinels reports" {
+ foreach_sentinel_id id {
+ S $id SENTINEL SET mymaster quorum [expr $sentinels+1]
+ }
+ set old_port [RPort $master_id]
+ set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
+ assert {[lindex $addr 1] == $old_port}
+ kill_instance redis $master_id
+
+ # Make sure failover did not happened.
+ set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
+ assert {[lindex $addr 1] == $old_port}
+ restart_instance redis $master_id
+}
+
+test "Failover is not possible without majority agreement" {
+ foreach_sentinel_id id {
+ S $id SENTINEL SET mymaster quorum $quorum
+ }
+
+ # Crash majority of sentinels
+ for {set id 0} {$id < $quorum} {incr id} {
+ kill_instance sentinel $id
+ }
+
+ # Kill the current master
+ kill_instance redis $master_id
+
+ # Make sure failover did not happened.
+ set addr [S $quorum SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
+ assert {[lindex $addr 1] == $old_port}
+ restart_instance redis $master_id
+
+ # Cleanup: restart Sentinels to monitor the master.
+ for {set id 0} {$id < $quorum} {incr id} {
+ restart_instance sentinel $id
+ }
+}
+
+test "Failover works if we configure for absolute agreement" {
+ foreach_sentinel_id id {
+ S $id SENTINEL SET mymaster quorum $sentinels
+ }
+
+ # Wait for Sentinels to monitor the master again
+ foreach_sentinel_id id {
+ wait_for_condition 1000 100 {
+ [dict get [S $id SENTINEL MASTER mymaster] info-refresh] < 100000
+ } else {
+ fail "At least one Sentinel is not monitoring the master"
+ }
+ }
+
+ kill_instance redis $master_id
+
+ foreach_sentinel_id id {
+ wait_for_condition 1000 100 {
+ [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port
+ } else {
+ fail "At least one Sentinel did not receive failover info"
+ }
+ }
+ restart_instance redis $master_id
+ set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
+ set master_id [get_instance_id_by_port redis [lindex $addr 1]]
+
+ # Set the min ODOWN agreement back to strict majority.
+ foreach_sentinel_id id {
+ S $id SENTINEL SET mymaster quorum $quorum
+ }
+}
+
+test "New master [join $addr {:}] role matches" {
+ assert {[RI $master_id role] eq {master}}
+}
+
+test "SENTINEL RESET can resets the master" {
+ # After SENTINEL RESET, sometimes the sentinel can sense the master again,
+ # causing the test to fail. Here we give it a few more chances.
+ for {set j 0} {$j < 10} {incr j} {
+ assert_equal 1 [S 0 SENTINEL RESET mymaster]
+ set res1 [llength [S 0 SENTINEL SENTINELS mymaster]]
+ set res2 [llength [S 0 SENTINEL SLAVES mymaster]]
+ set res3 [llength [S 0 SENTINEL REPLICAS mymaster]]
+ if {$res1 eq 0 && $res2 eq 0 && $res3 eq 0} break
+ }
+ assert_equal 0 $res1
+ assert_equal 0 $res2
+ assert_equal 0 $res3
+}
diff --git a/tests/sentinel/tests/01-conf-update.tcl b/tests/sentinel/tests/01-conf-update.tcl
new file mode 100644
index 0000000..fe29bb0
--- /dev/null
+++ b/tests/sentinel/tests/01-conf-update.tcl
@@ -0,0 +1,50 @@
+# Test Sentinel configuration consistency after partitions heal.
+
+source "../tests/includes/init-tests.tcl"
+
+test "We can failover with Sentinel 1 crashed" {
+ set old_port [RPort $master_id]
+ set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
+ assert {[lindex $addr 1] == $old_port}
+
+ # Crash Sentinel 1
+ kill_instance sentinel 1
+
+ kill_instance redis $master_id
+ foreach_sentinel_id id {
+ if {$id != 1} {
+ wait_for_condition 1000 50 {
+ [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port
+ } else {
+ fail "Sentinel $id did not receive failover info"
+ }
+ }
+ }
+ restart_instance redis $master_id
+ set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
+ set master_id [get_instance_id_by_port redis [lindex $addr 1]]
+}
+
+test "After Sentinel 1 is restarted, its config gets updated" {
+ restart_instance sentinel 1
+ wait_for_condition 1000 50 {
+ [lindex [S 1 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port
+ } else {
+ fail "Restarted Sentinel did not receive failover info"
+ }
+}
+
+test "New master [join $addr {:}] role matches" {
+ assert {[RI $master_id role] eq {master}}
+}
+
+test "Update log level" {
+ set current_loglevel [S 0 SENTINEL CONFIG GET loglevel]
+ assert {[lindex $current_loglevel 1] == {notice}}
+
+ foreach {loglevel} {debug verbose notice warning nothing} {
+ S 0 SENTINEL CONFIG SET loglevel $loglevel
+ set updated_loglevel [S 0 SENTINEL CONFIG GET loglevel]
+ assert {[lindex $updated_loglevel 1] == $loglevel}
+ }
+}
diff --git a/tests/sentinel/tests/02-slaves-reconf.tcl b/tests/sentinel/tests/02-slaves-reconf.tcl
new file mode 100644
index 0000000..8196b60
--- /dev/null
+++ b/tests/sentinel/tests/02-slaves-reconf.tcl
@@ -0,0 +1,91 @@
+# Check that slaves are reconfigured at a latter time if they are partitioned.
+#
+# Here we should test:
+# 1) That slaves point to the new master after failover.
+# 2) That partitioned slaves point to new master when they are partitioned
+# away during failover and return at a latter time.
+
+source "../tests/includes/init-tests.tcl"
+
+proc 02_test_slaves_replication {} {
+ uplevel 1 {
+ test "Check that slaves replicate from current master" {
+ set master_port [RPort $master_id]
+ foreach_redis_id id {
+ if {$id == $master_id} continue
+ if {[instance_is_killed redis $id]} continue
+ wait_for_condition 1000 50 {
+ ([RI $id master_port] == $master_port) &&
+ ([RI $id master_link_status] eq {up})
+ } else {
+ fail "Redis slave $id is replicating from wrong master"
+ }
+ }
+ }
+ }
+}
+
+proc 02_crash_and_failover {} {
+ uplevel 1 {
+ test "Crash the master and force a failover" {
+ set old_port [RPort $master_id]
+ set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
+ assert {[lindex $addr 1] == $old_port}
+ kill_instance redis $master_id
+ foreach_sentinel_id id {
+ wait_for_condition 1000 50 {
+ [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port
+ } else {
+ fail "At least one Sentinel did not receive failover info"
+ }
+ }
+ restart_instance redis $master_id
+ set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
+ set master_id [get_instance_id_by_port redis [lindex $addr 1]]
+ }
+ }
+}
+
+02_test_slaves_replication
+02_crash_and_failover
+
+foreach_sentinel_id id {
+ S $id sentinel debug info-period 100
+ S $id sentinel debug default-down-after 1000
+ S $id sentinel debug publish-period 100
+}
+
+02_test_slaves_replication
+
+test "Kill a slave instance" {
+ foreach_redis_id id {
+ if {$id == $master_id} continue
+ set killed_slave_id $id
+ kill_instance redis $id
+ break
+ }
+}
+
+02_crash_and_failover
+02_test_slaves_replication
+
+test "Wait for failover to end" {
+ set inprogress 1
+ while {$inprogress} {
+ set inprogress 0
+ foreach_sentinel_id id {
+ if {[dict exists [S $id SENTINEL MASTER mymaster] failover-state]} {
+ incr inprogress
+ }
+ }
+ if {$inprogress} {after 100}
+ }
+}
+
+test "Restart killed slave and test replication of slaves again..." {
+ restart_instance redis $killed_slave_id
+}
+
+# Now we check if the slave rejoining the partition is reconfigured even
+# if the failover finished.
+02_test_slaves_replication
diff --git a/tests/sentinel/tests/03-runtime-reconf.tcl b/tests/sentinel/tests/03-runtime-reconf.tcl
new file mode 100644
index 0000000..bd6eecc
--- /dev/null
+++ b/tests/sentinel/tests/03-runtime-reconf.tcl
@@ -0,0 +1,225 @@
+# Test runtime reconfiguration command SENTINEL SET.
+source "../tests/includes/init-tests.tcl"
+set num_sentinels [llength $::sentinel_instances]
+
+set ::user "testuser"
+set ::password "secret"
+
+proc server_set_password {} {
+ foreach_redis_id id {
+ assert_equal {OK} [R $id CONFIG SET requirepass $::password]
+ assert_equal {OK} [R $id AUTH $::password]
+ assert_equal {OK} [R $id CONFIG SET masterauth $::password]
+ }
+}
+
+proc server_reset_password {} {
+ foreach_redis_id id {
+ assert_equal {OK} [R $id CONFIG SET requirepass ""]
+ assert_equal {OK} [R $id CONFIG SET masterauth ""]
+ }
+}
+
+proc server_set_acl {id} {
+ assert_equal {OK} [R $id ACL SETUSER $::user on >$::password allchannels +@all]
+ assert_equal {OK} [R $id ACL SETUSER default off]
+
+ R $id CLIENT KILL USER default SKIPME no
+ assert_equal {OK} [R $id AUTH $::user $::password]
+ assert_equal {OK} [R $id CONFIG SET masteruser $::user]
+ assert_equal {OK} [R $id CONFIG SET masterauth $::password]
+}
+
+proc server_reset_acl {id} {
+ assert_equal {OK} [R $id ACL SETUSER default on]
+ assert_equal {1} [R $id ACL DELUSER $::user]
+
+ assert_equal {OK} [R $id CONFIG SET masteruser ""]
+ assert_equal {OK} [R $id CONFIG SET masterauth ""]
+}
+
+proc verify_sentinel_connect_replicas {id} {
+ foreach replica [S $id SENTINEL REPLICAS mymaster] {
+ if {[string match "*disconnected*" [dict get $replica flags]]} {
+ return 0
+ }
+ }
+ return 1
+}
+
+proc wait_for_sentinels_connect_servers { {is_connect 1} } {
+ foreach_sentinel_id id {
+ wait_for_condition 1000 50 {
+ [string match "*disconnected*" [dict get [S $id SENTINEL MASTER mymaster] flags]] != $is_connect
+ } else {
+ fail "At least some sentinel can't connect to master"
+ }
+
+ wait_for_condition 1000 50 {
+ [verify_sentinel_connect_replicas $id] == $is_connect
+ } else {
+ fail "At least some sentinel can't connect to replica"
+ }
+ }
+}
+
+test "Sentinels (re)connection following SENTINEL SET mymaster auth-pass" {
+ # 3 types of sentinels to test:
+ # (re)started while master changed pwd. Manage to connect only after setting pwd
+ set sent2re 0
+ # (up)dated in advance with master new password
+ set sent2up 1
+ # (un)touched. Yet manage to maintain (old) connection
+ set sent2un 2
+
+ wait_for_sentinels_connect_servers
+ kill_instance sentinel $sent2re
+ server_set_password
+ assert_equal {OK} [S $sent2up SENTINEL SET mymaster auth-pass $::password]
+ restart_instance sentinel $sent2re
+
+ # Verify sentinel that restarted failed to connect master
+ wait_for_condition 100 50 {
+ [string match "*disconnected*" [dict get [S $sent2re SENTINEL MASTER mymaster] flags]] != 0
+ } else {
+ fail "Expected to be disconnected from master due to wrong password"
+ }
+
+ # Update restarted sentinel with master password
+ assert_equal {OK} [S $sent2re SENTINEL SET mymaster auth-pass $::password]
+
+ # All sentinels expected to connect successfully
+ wait_for_sentinels_connect_servers
+
+ # remove requirepass and verify sentinels manage to connect servers
+ server_reset_password
+ wait_for_sentinels_connect_servers
+ # Sanity check
+ verify_sentinel_auto_discovery
+}
+
+test "Sentinels (re)connection following master ACL change" {
+ # Three types of sentinels to test during ACL change:
+ # 1. (re)started Sentinel. Manage to connect only after setting new pwd
+ # 2. (up)dated Sentinel, get just before ACL change the new password
+ # 3. (un)touched Sentinel that kept old connection with master and didn't
+ # set new ACL password won't persist ACL pwd change (unlike legacy auth-pass)
+ set sent2re 0
+ set sent2up 1
+ set sent2un 2
+
+ wait_for_sentinels_connect_servers
+ # kill sentinel 'sent2re' and restart it after ACL change
+ kill_instance sentinel $sent2re
+
+ # Update sentinel 'sent2up' with new user and pwd
+ assert_equal {OK} [S $sent2up SENTINEL SET mymaster auth-user $::user]
+ assert_equal {OK} [S $sent2up SENTINEL SET mymaster auth-pass $::password]
+
+ foreach_redis_id id {
+ server_set_acl $id
+ }
+
+ restart_instance sentinel $sent2re
+
+ # Verify sentinel that restarted failed to reconnect master
+ wait_for_condition 100 50 {
+ [string match "*disconnected*" [dict get [S $sent2re SENTINEL MASTER mymaster] flags]] != 0
+ } else {
+ fail "Expected: Restarted sentinel to be disconnected from master due to obsolete password"
+ }
+
+ # Verify sentinel with updated password managed to connect (wait for sentinelTimer to reconnect)
+ wait_for_condition 100 50 {
+ [string match "*disconnected*" [dict get [S $sent2up SENTINEL MASTER mymaster] flags]] == 0
+ } else {
+ fail "Expected: Sentinel to be connected to master"
+ }
+
+ # Verify sentinel untouched gets failed to connect master
+ wait_for_condition 100 50 {
+ [string match "*disconnected*" [dict get [S $sent2un SENTINEL MASTER mymaster] flags]] != 0
+ } else {
+ fail "Expected: Sentinel to be disconnected from master due to obsolete password"
+ }
+
+ # Now update all sentinels with new password
+ foreach_sentinel_id id {
+ assert_equal {OK} [S $id SENTINEL SET mymaster auth-user $::user]
+ assert_equal {OK} [S $id SENTINEL SET mymaster auth-pass $::password]
+ }
+
+ # All sentinels expected to connect successfully
+ wait_for_sentinels_connect_servers
+
+ # remove requirepass and verify sentinels manage to connect servers
+ foreach_redis_id id {
+ server_reset_acl $id
+ }
+
+ wait_for_sentinels_connect_servers
+ # Sanity check
+ verify_sentinel_auto_discovery
+}
+
+test "Set parameters in normal case" {
+
+ set info [S 0 SENTINEL master mymaster]
+ set origin_quorum [dict get $info quorum]
+ set origin_down_after_milliseconds [dict get $info down-after-milliseconds]
+ set update_quorum [expr $origin_quorum+1]
+ set update_down_after_milliseconds [expr $origin_down_after_milliseconds+1000]
+
+ assert_equal [S 0 SENTINEL SET mymaster quorum $update_quorum] "OK"
+ assert_equal [S 0 SENTINEL SET mymaster down-after-milliseconds $update_down_after_milliseconds] "OK"
+
+ set update_info [S 0 SENTINEL master mymaster]
+ assert {[dict get $update_info quorum] != $origin_quorum}
+ assert {[dict get $update_info down-after-milliseconds] != $origin_down_after_milliseconds}
+
+ #restore to origin config parameters
+ assert_equal [S 0 SENTINEL SET mymaster quorum $origin_quorum] "OK"
+ assert_equal [S 0 SENTINEL SET mymaster down-after-milliseconds $origin_down_after_milliseconds] "OK"
+}
+
+test "Set parameters in normal case with bad format" {
+
+ set info [S 0 SENTINEL master mymaster]
+ set origin_down_after_milliseconds [dict get $info down-after-milliseconds]
+
+ assert_error "ERR Invalid argument '-20' for SENTINEL SET 'down-after-milliseconds'*" {S 0 SENTINEL SET mymaster down-after-milliseconds -20}
+ assert_error "ERR Invalid argument 'abc' for SENTINEL SET 'down-after-milliseconds'*" {S 0 SENTINEL SET mymaster down-after-milliseconds "abc"}
+
+ set current_info [S 0 SENTINEL master mymaster]
+ assert {[dict get $current_info down-after-milliseconds] == $origin_down_after_milliseconds}
+}
+
+test "Sentinel Set with other error situations" {
+
+ # non-existing script
+ assert_error "ERR Notification script seems non existing*" {S 0 SENTINEL SET mymaster notification-script test.txt}
+
+ # wrong parameter number
+ assert_error "ERR wrong number of arguments for 'sentinel|set' command" {S 0 SENTINEL SET mymaster fakeoption}
+
+ # unknown parameter option
+ assert_error "ERR Unknown option or number of arguments for SENTINEL SET 'fakeoption'" {S 0 SENTINEL SET mymaster fakeoption fakevalue}
+
+ # save new config to disk failed
+ set info [S 0 SENTINEL master mymaster]
+ set origin_quorum [dict get $info quorum]
+ set update_quorum [expr $origin_quorum+1]
+ set sentinel_id 0
+ set configfilename [file join "sentinel_$sentinel_id" "sentinel.conf"]
+ set configfilename_bak [file join "sentinel_$sentinel_id" "sentinel.conf.bak"]
+
+ file rename $configfilename $configfilename_bak
+ file mkdir $configfilename
+
+ catch {[S 0 SENTINEL SET mymaster quorum $update_quorum]} err
+
+ file delete $configfilename
+ file rename $configfilename_bak $configfilename
+
+ assert_match "ERR Failed to save config file*" $err
+}
diff --git a/tests/sentinel/tests/04-slave-selection.tcl b/tests/sentinel/tests/04-slave-selection.tcl
new file mode 100644
index 0000000..3d2ca64
--- /dev/null
+++ b/tests/sentinel/tests/04-slave-selection.tcl
@@ -0,0 +1,5 @@
+# Test slave selection algorithm.
+#
+# This unit should test:
+# 1) That when there are no suitable slaves no failover is performed.
+# 2) That among the available slaves, the one with better offset is picked.
diff --git a/tests/sentinel/tests/05-manual.tcl b/tests/sentinel/tests/05-manual.tcl
new file mode 100644
index 0000000..95e8d41
--- /dev/null
+++ b/tests/sentinel/tests/05-manual.tcl
@@ -0,0 +1,94 @@
+# Test manual failover
+
+source "../tests/includes/init-tests.tcl"
+
+foreach_sentinel_id id {
+ S $id sentinel debug info-period 2000
+ S $id sentinel debug default-down-after 6000
+ S $id sentinel debug publish-period 1000
+}
+
+test "Manual failover works" {
+ set old_port [RPort $master_id]
+ set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
+ assert {[lindex $addr 1] == $old_port}
+
+ # Since we reduced the info-period (default 10000) above immediately,
+ # sentinel - replica may not have enough time to exchange INFO and update
+ # the replica's info-period, so the test may get a NOGOODSLAVE.
+ wait_for_condition 300 50 {
+ [catch {S 0 SENTINEL FAILOVER mymaster}] == 0
+ } else {
+ catch {S 0 SENTINEL FAILOVER mymaster} reply
+ puts [S 0 SENTINEL REPLICAS mymaster]
+ fail "Sentinel manual failover did not work, got: $reply"
+ }
+
+ catch {S 0 SENTINEL FAILOVER mymaster} reply
+ assert_match {*INPROG*} $reply ;# Failover already in progress
+
+ foreach_sentinel_id id {
+ wait_for_condition 1000 50 {
+ [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port
+ } else {
+ fail "At least one Sentinel did not receive failover info"
+ }
+ }
+ set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
+ set master_id [get_instance_id_by_port redis [lindex $addr 1]]
+}
+
+test "New master [join $addr {:}] role matches" {
+ assert {[RI $master_id role] eq {master}}
+}
+
+test "All the other slaves now point to the new master" {
+ foreach_redis_id id {
+ if {$id != $master_id && $id != 0} {
+ wait_for_condition 1000 50 {
+ [RI $id master_port] == [lindex $addr 1]
+ } else {
+ fail "Redis ID $id not configured to replicate with new master"
+ }
+ }
+ }
+}
+
+test "The old master eventually gets reconfigured as a slave" {
+ wait_for_condition 1000 50 {
+ [RI 0 master_port] == [lindex $addr 1]
+ } else {
+ fail "Old master not reconfigured as slave of new master"
+ }
+}
+
+foreach flag {crash-after-election crash-after-promotion} {
+ # Before each SIMULATE-FAILURE test, re-source init-tests to get a clean environment
+ source "../tests/includes/init-tests.tcl"
+
+ test "SENTINEL SIMULATE-FAILURE $flag works" {
+ assert_equal {OK} [S 0 SENTINEL SIMULATE-FAILURE $flag]
+
+ # Trigger a failover, failover will trigger leader election, replica promotion
+ # Sentinel may enter failover and exit before the command, catch it and allow it
+ wait_for_condition 300 50 {
+ [catch {S 0 SENTINEL FAILOVER mymaster}] == 0
+ ||
+ ([catch {S 0 SENTINEL FAILOVER mymaster} reply] == 1 &&
+ [string match {*couldn't open socket: connection refused*} $reply])
+ } else {
+ catch {S 0 SENTINEL FAILOVER mymaster} reply
+ fail "Sentinel manual failover did not work, got: $reply"
+ }
+
+ # Wait for sentinel to exit (due to simulate-failure flags)
+ wait_for_condition 1000 50 {
+ [catch {S 0 PING}] == 1
+ } else {
+ fail "Sentinel set $flag but did not exit"
+ }
+ assert_error {*couldn't open socket: connection refused*} {S 0 PING}
+
+ restart_instance sentinel 0
+ }
+}
diff --git a/tests/sentinel/tests/06-ckquorum.tcl b/tests/sentinel/tests/06-ckquorum.tcl
new file mode 100644
index 0000000..36c3dc6
--- /dev/null
+++ b/tests/sentinel/tests/06-ckquorum.tcl
@@ -0,0 +1,42 @@
+# Test for the SENTINEL CKQUORUM command
+
+source "../tests/includes/init-tests.tcl"
+set num_sentinels [llength $::sentinel_instances]
+
+test "CKQUORUM reports OK and the right amount of Sentinels" {
+ foreach_sentinel_id id {
+ assert_match "*OK $num_sentinels usable*" [S $id SENTINEL CKQUORUM mymaster]
+ }
+}
+
+test "CKQUORUM detects quorum cannot be reached" {
+ set orig_quorum [expr {$num_sentinels/2+1}]
+ S 0 SENTINEL SET mymaster quorum [expr {$num_sentinels+1}]
+ catch {[S 0 SENTINEL CKQUORUM mymaster]} err
+ assert_match "*NOQUORUM*" $err
+ S 0 SENTINEL SET mymaster quorum $orig_quorum
+}
+
+test "CKQUORUM detects failover authorization cannot be reached" {
+ set orig_quorum [expr {$num_sentinels/2+1}]
+ S 0 SENTINEL SET mymaster quorum 1
+ for {set i 0} {$i < $orig_quorum} {incr i} {
+ kill_instance sentinel [expr {$i + 1}]
+ }
+
+ # We need to make sure that other sentinels are in `DOWN` state
+ # from the point of view of S 0 before we executing `CKQUORUM`.
+ wait_for_condition 300 50 {
+ [catch {S 0 SENTINEL CKQUORUM mymaster}] == 1
+ } else {
+ fail "At least $orig_quorum sentinels did not enter the down state."
+ }
+
+ assert_error "*NOQUORUM*" {S 0 SENTINEL CKQUORUM mymaster}
+
+ S 0 SENTINEL SET mymaster quorum $orig_quorum
+ for {set i 0} {$i < $orig_quorum} {incr i} {
+ restart_instance sentinel [expr {$i + 1}]
+ }
+}
+
diff --git a/tests/sentinel/tests/07-down-conditions.tcl b/tests/sentinel/tests/07-down-conditions.tcl
new file mode 100644
index 0000000..dabbc14
--- /dev/null
+++ b/tests/sentinel/tests/07-down-conditions.tcl
@@ -0,0 +1,104 @@
+# Test conditions where an instance is considered to be down
+
+source "../tests/includes/init-tests.tcl"
+source "../../../tests/support/cli.tcl"
+
+foreach_sentinel_id id {
+ S $id sentinel debug info-period 1000
+ S $id sentinel debug ask-period 100
+ S $id sentinel debug default-down-after 3000
+ S $id sentinel debug publish-period 200
+ S $id sentinel debug ping-period 100
+}
+
+set ::alive_sentinel [expr {$::instances_count/2+2}]
+proc ensure_master_up {} {
+ S $::alive_sentinel sentinel debug info-period 1000
+ S $::alive_sentinel sentinel debug ping-period 100
+ S $::alive_sentinel sentinel debug ask-period 100
+ S $::alive_sentinel sentinel debug publish-period 100
+ wait_for_condition 1000 50 {
+ [dict get [S $::alive_sentinel sentinel master mymaster] flags] eq "master"
+ } else {
+ fail "Master flags are not just 'master'"
+ }
+}
+
+proc ensure_master_down {} {
+ S $::alive_sentinel sentinel debug info-period 1000
+ S $::alive_sentinel sentinel debug ping-period 100
+ S $::alive_sentinel sentinel debug ask-period 100
+ S $::alive_sentinel sentinel debug publish-period 100
+ wait_for_condition 1000 50 {
+ [string match *down* \
+ [dict get [S $::alive_sentinel sentinel master mymaster] flags]]
+ } else {
+ fail "Master is not flagged SDOWN"
+ }
+}
+
+test "Crash the majority of Sentinels to prevent failovers for this unit" {
+ for {set id 0} {$id < $quorum} {incr id} {
+ kill_instance sentinel $id
+ }
+}
+
+test "SDOWN is triggered by non-responding but not crashed instance" {
+ ensure_master_up
+ set master_addr [S $::alive_sentinel SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
+ set master_id [get_instance_id_by_port redis [lindex $master_addr 1]]
+
+ set pid [get_instance_attrib redis $master_id pid]
+ pause_process $pid
+ ensure_master_down
+ resume_process $pid
+ ensure_master_up
+}
+
+test "SDOWN is triggered by crashed instance" {
+ lassign [S $::alive_sentinel SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] host port
+ ensure_master_up
+ kill_instance redis 0
+ ensure_master_down
+ restart_instance redis 0
+ ensure_master_up
+}
+
+test "SDOWN is triggered by masters advertising as slaves" {
+ ensure_master_up
+ R 0 slaveof 127.0.0.1 34567
+ ensure_master_down
+ R 0 slaveof no one
+ ensure_master_up
+}
+
+if {!$::log_req_res} { # this test changes 'dir' config to '/' and logreqres.c cannot open protocol dump file under the root directory.
+test "SDOWN is triggered by misconfigured instance replying with errors" {
+ ensure_master_up
+ set orig_dir [lindex [R 0 config get dir] 1]
+ set orig_save [lindex [R 0 config get save] 1]
+ # Set dir to / and filename to "tmp" to make sure it will fail.
+ R 0 config set dir /
+ R 0 config set dbfilename tmp
+ R 0 config set save "1000000 1000000"
+ after 5000
+ R 0 bgsave
+ after 5000
+ ensure_master_down
+ R 0 config set save $orig_save
+ R 0 config set dir $orig_dir
+ R 0 config set dbfilename dump.rdb
+ R 0 bgsave
+ ensure_master_up
+}
+}
+
+# We use this test setup to also test command renaming, as a side
+# effect of the master going down if we send PONG instead of PING
+test "SDOWN is triggered if we rename PING to PONG" {
+ ensure_master_up
+ S $::alive_sentinel SENTINEL SET mymaster rename-command PING PONG
+ ensure_master_down
+ S $::alive_sentinel SENTINEL SET mymaster rename-command PING PING
+ ensure_master_up
+}
diff --git a/tests/sentinel/tests/08-hostname-conf.tcl b/tests/sentinel/tests/08-hostname-conf.tcl
new file mode 100644
index 0000000..263b06f
--- /dev/null
+++ b/tests/sentinel/tests/08-hostname-conf.tcl
@@ -0,0 +1,69 @@
+source "../tests/includes/utils.tcl"
+
+proc set_redis_announce_ip {addr} {
+ foreach_redis_id id {
+ R $id config set replica-announce-ip $addr
+ }
+}
+
+proc set_sentinel_config {keyword value} {
+ foreach_sentinel_id id {
+ S $id sentinel config set $keyword $value
+ }
+}
+
+proc set_all_instances_hostname {hostname} {
+ foreach_sentinel_id id {
+ set_instance_attrib sentinel $id host $hostname
+ }
+ foreach_redis_id id {
+ set_instance_attrib redis $id host $hostname
+ }
+}
+
+test "(pre-init) Configure instances and sentinel for hostname use" {
+ set ::host "localhost"
+ restart_killed_instances
+ set_all_instances_hostname $::host
+ set_redis_announce_ip $::host
+ set_sentinel_config resolve-hostnames yes
+ set_sentinel_config announce-hostnames yes
+}
+
+source "../tests/includes/init-tests.tcl"
+
+proc verify_hostname_announced {hostname} {
+ foreach_sentinel_id id {
+ # Master is reported with its hostname
+ if {![string equal [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 0] $hostname]} {
+ return 0
+ }
+
+ # Replicas are reported with their hostnames
+ foreach replica [S $id SENTINEL REPLICAS mymaster] {
+ if {![string equal [dict get $replica ip] $hostname]} {
+ return 0
+ }
+ }
+ }
+ return 1
+}
+
+test "Sentinel announces hostnames" {
+ # Check initial state
+ verify_hostname_announced $::host
+
+ # Disable announce-hostnames and confirm IPs are used
+ set_sentinel_config announce-hostnames no
+ assert {[verify_hostname_announced "127.0.0.1"] || [verify_hostname_announced "::1"]}
+}
+
+# We need to revert any special configuration because all tests currently
+# share the same instances.
+test "(post-cleanup) Configure instances and sentinel for IPs" {
+ set ::host "127.0.0.1"
+ set_all_instances_hostname $::host
+ set_redis_announce_ip $::host
+ set_sentinel_config resolve-hostnames no
+ set_sentinel_config announce-hostnames no
+} \ No newline at end of file
diff --git a/tests/sentinel/tests/09-acl-support.tcl b/tests/sentinel/tests/09-acl-support.tcl
new file mode 100644
index 0000000..a754dac
--- /dev/null
+++ b/tests/sentinel/tests/09-acl-support.tcl
@@ -0,0 +1,56 @@
+
+source "../tests/includes/init-tests.tcl"
+
+set ::user "testuser"
+set ::password "secret"
+
+proc setup_acl {} {
+ foreach_sentinel_id id {
+ assert_equal {OK} [S $id ACL SETUSER $::user >$::password +@all on]
+ assert_equal {OK} [S $id ACL SETUSER default off]
+
+ S $id CLIENT KILL USER default SKIPME no
+ assert_equal {OK} [S $id AUTH $::user $::password]
+ }
+}
+
+proc teardown_acl {} {
+ foreach_sentinel_id id {
+ assert_equal {OK} [S $id ACL SETUSER default on]
+ assert_equal {1} [S $id ACL DELUSER $::user]
+
+ S $id SENTINEL CONFIG SET sentinel-user ""
+ S $id SENTINEL CONFIG SET sentinel-pass ""
+ }
+}
+
+test "(post-init) Set up ACL configuration" {
+ setup_acl
+ assert_equal $::user [S 1 ACL WHOAMI]
+}
+
+test "SENTINEL CONFIG SET handles on-the-fly credentials reconfiguration" {
+ # Make sure we're starting with a broken state...
+ wait_for_condition 200 50 {
+ [catch {S 1 SENTINEL CKQUORUM mymaster}] == 1
+ } else {
+ fail "Expected: Sentinel to be disconnected from master due to wrong password"
+ }
+ assert_error "*NOQUORUM*" {S 1 SENTINEL CKQUORUM mymaster}
+
+ foreach_sentinel_id id {
+ assert_equal {OK} [S $id SENTINEL CONFIG SET sentinel-user $::user]
+ assert_equal {OK} [S $id SENTINEL CONFIG SET sentinel-pass $::password]
+ }
+
+ wait_for_condition 200 50 {
+ [catch {S 1 SENTINEL CKQUORUM mymaster}] == 0
+ } else {
+ fail "Expected: Sentinel to be connected to master after setting password"
+ }
+ assert_match {*OK*} [S 1 SENTINEL CKQUORUM mymaster]
+}
+
+test "(post-cleanup) Tear down ACL configuration" {
+ teardown_acl
+}
diff --git a/tests/sentinel/tests/10-replica-priority.tcl b/tests/sentinel/tests/10-replica-priority.tcl
new file mode 100644
index 0000000..d3f868a
--- /dev/null
+++ b/tests/sentinel/tests/10-replica-priority.tcl
@@ -0,0 +1,76 @@
+source "../tests/includes/init-tests.tcl"
+
+test "Check acceptable replica-priority values" {
+ foreach_redis_id id {
+ if {$id == $master_id} continue
+
+ # ensure replica-announced accepts yes and no
+ catch {R $id CONFIG SET replica-announced no} e
+ if {$e ne "OK"} {
+ fail "Unable to set replica-announced to no"
+ }
+ catch {R $id CONFIG SET replica-announced yes} e
+ if {$e ne "OK"} {
+ fail "Unable to set replica-announced to yes"
+ }
+
+ # ensure a random value throw error
+ catch {R $id CONFIG SET replica-announced 321} e
+ if {$e eq "OK"} {
+ fail "Able to set replica-announced with something else than yes or no (321) whereas it should not be possible"
+ }
+ catch {R $id CONFIG SET replica-announced a3b2c1} e
+ if {$e eq "OK"} {
+ fail "Able to set replica-announced with something else than yes or no (a3b2c1) whereas it should not be possible"
+ }
+
+ # test only the first redis replica, no need to double test
+ break
+ }
+}
+
+proc 10_test_number_of_replicas {n_replicas_expected} {
+ test "Check sentinel replies with $n_replicas_expected replicas" {
+ # ensure sentinels replies with the right number of replicas
+ foreach_sentinel_id id {
+ S $id sentinel debug info-period 100
+ S $id sentinel debug default-down-after 1000
+ S $id sentinel debug publish-period 100
+ set len [llength [S $id SENTINEL REPLICAS mymaster]]
+ wait_for_condition 200 100 {
+ [llength [S $id SENTINEL REPLICAS mymaster]] == $n_replicas_expected
+ } else {
+ fail "Sentinel replies with a wrong number of replicas with replica-announced=yes (expected $n_replicas_expected but got $len) on sentinel $id"
+ }
+ }
+ }
+}
+
+proc 10_set_replica_announced {master_id announced n_replicas} {
+ test "Set replica-announced=$announced on $n_replicas replicas" {
+ set i 0
+ foreach_redis_id id {
+ if {$id == $master_id} continue
+ #puts "set replica-announce=$announced on redis #$id"
+ R $id CONFIG SET replica-announced "$announced"
+ incr i
+ if { $n_replicas!="all" && $i >= $n_replicas } { break }
+ }
+ }
+}
+
+# ensure all replicas are announced
+10_set_replica_announced $master_id "yes" "all"
+# ensure all replicas are announced by sentinels
+10_test_number_of_replicas 4
+
+# ensure the first 2 replicas are not announced
+10_set_replica_announced $master_id "no" 2
+# ensure sentinels are not announcing the first 2 replicas that have been set unannounced
+10_test_number_of_replicas 2
+
+# ensure all replicas are announced
+10_set_replica_announced $master_id "yes" "all"
+# ensure all replicas are not announced by sentinels
+10_test_number_of_replicas 4
+
diff --git a/tests/sentinel/tests/11-port-0.tcl b/tests/sentinel/tests/11-port-0.tcl
new file mode 100644
index 0000000..a3e8bdb
--- /dev/null
+++ b/tests/sentinel/tests/11-port-0.tcl
@@ -0,0 +1,33 @@
+source "../tests/includes/init-tests.tcl"
+
+test "Start/Stop sentinel on same port with a different runID should not change the total number of sentinels" {
+ set sentinel_id [expr $::instances_count-1]
+ # Kill sentinel instance
+ kill_instance sentinel $sentinel_id
+
+ # Delete line with myid in sentinels config file
+ set orgfilename [file join "sentinel_$sentinel_id" "sentinel.conf"]
+ set tmpfilename "sentinel.conf_tmp"
+ set dirname "sentinel_$sentinel_id"
+
+ delete_lines_with_pattern $orgfilename $tmpfilename "myid"
+
+ # Get count of total sentinels
+ set a [S 0 SENTINEL master mymaster]
+ set original_count [lindex $a 33]
+
+ # Restart sentinel with the modified config file
+ set pid [exec_instance "sentinel" $dirname $orgfilename]
+ lappend ::pids $pid
+
+ after 1000
+
+ # Get new count of total sentinel
+ set b [S 0 SENTINEL master mymaster]
+ set curr_count [lindex $b 33]
+
+ # If the count is not the same then fail the test
+ if {$original_count != $curr_count} {
+ fail "Sentinel count is incorrect, original count being $original_count and current count is $curr_count"
+ }
+}
diff --git a/tests/sentinel/tests/12-master-reboot.tcl b/tests/sentinel/tests/12-master-reboot.tcl
new file mode 100644
index 0000000..1fdd91d
--- /dev/null
+++ b/tests/sentinel/tests/12-master-reboot.tcl
@@ -0,0 +1,103 @@
+# Check the basic monitoring and failover capabilities.
+source "../tests/includes/init-tests.tcl"
+
+
+if {$::simulate_error} {
+ test "This test will fail" {
+ fail "Simulated error"
+ }
+}
+
+
+# Reboot an instance previously in very short time but do not check if it is loading
+proc reboot_instance {type id} {
+ set dirname "${type}_${id}"
+ set cfgfile [file join $dirname $type.conf]
+ set port [get_instance_attrib $type $id port]
+
+ # Execute the instance with its old setup and append the new pid
+ # file for cleanup.
+ set pid [exec_instance $type $dirname $cfgfile]
+ set_instance_attrib $type $id pid $pid
+ lappend ::pids $pid
+
+ # Check that the instance is running
+ if {[server_is_up 127.0.0.1 $port 100] == 0} {
+ set logfile [file join $dirname log.txt]
+ puts [exec tail $logfile]
+ abort_sentinel_test "Problems starting $type #$id: ping timeout, maybe server start failed, check $logfile"
+ }
+
+ # Connect with it with a fresh link
+ set link [redis 127.0.0.1 $port 0 $::tls]
+ $link reconnect 1
+ set_instance_attrib $type $id link $link
+}
+
+
+test "Master reboot in very short time" {
+ set old_port [RPort $master_id]
+ set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
+ assert {[lindex $addr 1] == $old_port}
+
+ R $master_id debug populate 10000
+ R $master_id bgsave
+ R $master_id config set key-load-delay 1500
+ R $master_id config set loading-process-events-interval-bytes 1024
+ R $master_id config rewrite
+
+ foreach_sentinel_id id {
+ S $id SENTINEL SET mymaster master-reboot-down-after-period 5000
+ S $id sentinel debug ping-period 500
+ S $id sentinel debug ask-period 500
+ }
+
+ kill_instance redis $master_id
+ reboot_instance redis $master_id
+
+ foreach_sentinel_id id {
+ wait_for_condition 1000 100 {
+ [lindex [S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster] 1] != $old_port
+ } else {
+ fail "At least one Sentinel did not receive failover info"
+ }
+ }
+
+ set addr [S 0 SENTINEL GET-MASTER-ADDR-BY-NAME mymaster]
+ set master_id [get_instance_id_by_port redis [lindex $addr 1]]
+
+ # Make sure the instance load all the dataset
+ while 1 {
+ catch {[$link ping]} retval
+ if {[string match {*LOADING*} $retval]} {
+ after 100
+ continue
+ } else {
+ break
+ }
+ }
+}
+
+test "New master [join $addr {:}] role matches" {
+ assert {[RI $master_id role] eq {master}}
+}
+
+test "All the other slaves now point to the new master" {
+ foreach_redis_id id {
+ if {$id != $master_id && $id != 0} {
+ wait_for_condition 1000 50 {
+ [RI $id master_port] == [lindex $addr 1]
+ } else {
+ fail "Redis ID $id not configured to replicate with new master"
+ }
+ }
+ }
+}
+
+test "The old master eventually gets reconfigured as a slave" {
+ wait_for_condition 1000 50 {
+ [RI 0 master_port] == [lindex $addr 1]
+ } else {
+ fail "Old master not reconfigured as slave of new master"
+ }
+} \ No newline at end of file
diff --git a/tests/sentinel/tests/13-info-command.tcl b/tests/sentinel/tests/13-info-command.tcl
new file mode 100644
index 0000000..ef9dc01
--- /dev/null
+++ b/tests/sentinel/tests/13-info-command.tcl
@@ -0,0 +1,47 @@
+source "../tests/includes/init-tests.tcl"
+
+test "info command with at most one argument" {
+ set subCommandList {}
+ foreach arg {"" "all" "default" "everything"} {
+ if {$arg == ""} {
+ set info [S 0 info]
+ } else {
+ set info [S 0 info $arg]
+ }
+ assert { [string match "*redis_version*" $info] }
+ assert { [string match "*maxclients*" $info] }
+ assert { [string match "*used_cpu_user*" $info] }
+ assert { [string match "*sentinel_tilt*" $info] }
+ assert { ![string match "*used_memory*" $info] }
+ assert { ![string match "*rdb_last_bgsave*" $info] }
+ assert { ![string match "*master_repl_offset*" $info] }
+ assert { ![string match "*cluster_enabled*" $info] }
+ }
+}
+
+test "info command with one sub-section" {
+ set info [S 0 info cpu]
+ assert { [string match "*used_cpu_user*" $info] }
+ assert { ![string match "*sentinel_tilt*" $info] }
+ assert { ![string match "*redis_version*" $info] }
+
+ set info [S 0 info sentinel]
+ assert { [string match "*sentinel_tilt*" $info] }
+ assert { ![string match "*used_cpu_user*" $info] }
+ assert { ![string match "*redis_version*" $info] }
+}
+
+test "info command with multiple sub-sections" {
+ set info [S 0 info server sentinel replication]
+ assert { [string match "*redis_version*" $info] }
+ assert { [string match "*sentinel_tilt*" $info] }
+ assert { ![string match "*used_memory*" $info] }
+ assert { ![string match "*used_cpu_user*" $info] }
+
+ set info [S 0 info cpu all]
+ assert { [string match "*used_cpu_user*" $info] }
+ assert { [string match "*sentinel_tilt*" $info] }
+ assert { [string match "*redis_version*" $info] }
+ assert { ![string match "*used_memory*" $info] }
+ assert { ![string match "*master_repl_offset*" $info] }
+}
diff --git a/tests/sentinel/tests/14-debug-command.tcl b/tests/sentinel/tests/14-debug-command.tcl
new file mode 100644
index 0000000..dccb992
--- /dev/null
+++ b/tests/sentinel/tests/14-debug-command.tcl
@@ -0,0 +1,9 @@
+source "../tests/includes/init-tests.tcl"
+
+test "Sentinel debug test with arguments and without argument" {
+ set current_info_period [lindex [S 0 SENTINEL DEBUG] 1]
+ S 0 SENTINEL DEBUG info-period 8888
+ assert_equal {8888} [lindex [S 0 SENTINEL DEBUG] 1]
+ S 0 SENTINEL DEBUG info-period $current_info_period
+ assert_equal $current_info_period [lindex [S 0 SENTINEL DEBUG] 1]
+}
diff --git a/tests/sentinel/tests/15-config-set-config-get.tcl b/tests/sentinel/tests/15-config-set-config-get.tcl
new file mode 100644
index 0000000..f9831f8
--- /dev/null
+++ b/tests/sentinel/tests/15-config-set-config-get.tcl
@@ -0,0 +1,58 @@
+source "../tests/includes/init-tests.tcl"
+
+test "SENTINEL CONFIG SET and SENTINEL CONFIG GET handles multiple variables" {
+ foreach_sentinel_id id {
+ assert_equal {OK} [S $id SENTINEL CONFIG SET resolve-hostnames yes announce-port 1234]
+ }
+ assert_match {*yes*1234*} [S 1 SENTINEL CONFIG GET resolve-hostnames announce-port]
+ assert_match {announce-port 1234} [S 1 SENTINEL CONFIG GET announce-port]
+}
+
+test "SENTINEL CONFIG GET for duplicate and unknown variables" {
+ assert_equal {OK} [S 1 SENTINEL CONFIG SET resolve-hostnames yes announce-port 1234]
+ assert_match {resolve-hostnames yes} [S 1 SENTINEL CONFIG GET resolve-hostnames resolve-hostnames does-not-exist]
+}
+
+test "SENTINEL CONFIG GET for patterns" {
+ assert_equal {OK} [S 1 SENTINEL CONFIG SET loglevel notice announce-port 1234 announce-hostnames yes ]
+ assert_match {loglevel notice} [S 1 SENTINEL CONFIG GET log* *level loglevel]
+ assert_match {announce-hostnames yes announce-ip*announce-port 1234} [S 1 SENTINEL CONFIG GET announce*]
+}
+
+test "SENTINEL CONFIG SET duplicate variables" {
+ catch {[S 1 SENTINEL CONFIG SET resolve-hostnames yes announce-port 1234 announce-port 100]} e
+ if {![string match "*Duplicate argument*" $e]} {
+ fail "Should give wrong arity error"
+ }
+}
+
+test "SENTINEL CONFIG SET, one option does not exist" {
+ foreach_sentinel_id id {
+ assert_equal {OK} [S $id SENTINEL CONFIG SET announce-port 111]
+ catch {[S $id SENTINEL CONFIG SET does-not-exist yes announce-port 1234]} e
+ if {![string match "*Invalid argument*" $e]} {
+ fail "Should give Invalid argument error"
+ }
+ }
+ # The announce-port should not be set to 1234 as it was called with a wrong argument
+ assert_match {*111*} [S 1 SENTINEL CONFIG GET announce-port]
+}
+
+test "SENTINEL CONFIG SET, one option with wrong value" {
+ foreach_sentinel_id id {
+ assert_equal {OK} [S $id SENTINEL CONFIG SET resolve-hostnames no]
+ catch {[S $id SENTINEL CONFIG SET announce-port -1234 resolve-hostnames yes]} e
+ if {![string match "*Invalid value*" $e]} {
+ fail "Expected to return Invalid value error"
+ }
+ }
+ # The resolve-hostnames should not be set to yes as it was called after an argument with an invalid value
+ assert_match {*no*} [S 1 SENTINEL CONFIG GET resolve-hostnames]
+}
+
+test "SENTINEL CONFIG SET, wrong number of arguments" {
+ catch {[S 1 SENTINEL CONFIG SET resolve-hostnames yes announce-port 1234 announce-ip]} e
+ if {![string match "*Missing argument*" $e]} {
+ fail "Expected to return Missing argument error"
+ }
+}
diff --git a/tests/sentinel/tests/helpers/check_leaked_fds.tcl b/tests/sentinel/tests/helpers/check_leaked_fds.tcl
new file mode 100755
index 0000000..482b3e0
--- /dev/null
+++ b/tests/sentinel/tests/helpers/check_leaked_fds.tcl
@@ -0,0 +1,79 @@
+#!/usr/bin/env tclsh
+#
+# This script detects file descriptors that have leaked from a parent process.
+#
+# Our goal is to detect file descriptors that were opened by the parent and
+# not cleaned up prior to exec(), but not file descriptors that were inherited
+# from the grandparent which the parent knows nothing about. To do that, we
+# look up every potential leak and try to match it against open files by the
+# grandparent process.
+
+# Get PID of parent process
+proc get_parent_pid {_pid} {
+ set fd [open "/proc/$_pid/status" "r"]
+ set content [read $fd]
+ close $fd
+
+ if {[regexp {\nPPid:\s+(\d+)} $content _ ppid]} {
+ return $ppid
+ }
+
+ error "failed to get parent pid"
+}
+
+# Read symlink to get info about the specified fd of the specified process.
+# The result can be the file name or an arbitrary string that identifies it.
+# When not able to read, an empty string is returned.
+proc get_fdlink {_pid fd} {
+ if { [catch {set fdlink [file readlink "/proc/$_pid/fd/$fd"]} err] } {
+ return ""
+ }
+ return $fdlink
+}
+
+# Linux only
+set os [exec uname]
+if {$os != "Linux"} {
+ puts "Only Linux is supported."
+ exit 0
+}
+
+if {![info exists env(LEAKED_FDS_FILE)]} {
+ puts "Missing LEAKED_FDS_FILE environment variable."
+ exit 0
+}
+
+set outfile $::env(LEAKED_FDS_FILE)
+set parent_pid [get_parent_pid [pid]]
+set grandparent_pid [get_parent_pid $parent_pid]
+set leaked_fds {}
+
+# Look for fds that were directly inherited from our parent but not from
+# our grandparent (tcl)
+foreach fd [glob -tails -directory "/proc/self/fd" *] {
+ # Ignore stdin/stdout/stderr
+ if {$fd == 0 || $fd == 1 || $fd == 2} {
+ continue
+ }
+
+ set fdlink [get_fdlink "self" $fd]
+ if {$fdlink == ""} {
+ continue
+ }
+
+ # We ignore fds that existed in the grandparent, or fds that don't exist
+ # in our parent (Sentinel process).
+ if {[get_fdlink $grandparent_pid $fd] == $fdlink ||
+ [get_fdlink $parent_pid $fd] != $fdlink} {
+ continue
+ }
+
+ lappend leaked_fds [list $fd $fdlink]
+}
+
+# Produce report only if we found leaks
+if {[llength $leaked_fds] > 0} {
+ set fd [open $outfile "w"]
+ puts $fd [join $leaked_fds "\n"]
+ close $fd
+}
diff --git a/tests/sentinel/tests/includes/init-tests.tcl b/tests/sentinel/tests/includes/init-tests.tcl
new file mode 100644
index 0000000..ddb1319
--- /dev/null
+++ b/tests/sentinel/tests/includes/init-tests.tcl
@@ -0,0 +1,63 @@
+# Initialization tests -- most units will start including this.
+source "../tests/includes/utils.tcl"
+
+test "(init) Restart killed instances" {
+ restart_killed_instances
+}
+
+test "(init) Remove old master entry from sentinels" {
+ foreach_sentinel_id id {
+ catch {S $id SENTINEL REMOVE mymaster}
+ }
+}
+
+set redis_slaves [expr $::instances_count - 1]
+test "(init) Create a master-slaves cluster of [expr $redis_slaves+1] instances" {
+ create_redis_master_slave_cluster [expr {$redis_slaves+1}]
+}
+set master_id 0
+
+test "(init) Sentinels can start monitoring a master" {
+ set sentinels [llength $::sentinel_instances]
+ set quorum [expr {$sentinels/2+1}]
+ foreach_sentinel_id id {
+ S $id SENTINEL MONITOR mymaster \
+ [get_instance_attrib redis $master_id host] \
+ [get_instance_attrib redis $master_id port] $quorum
+ }
+ foreach_sentinel_id id {
+ assert {[S $id sentinel master mymaster] ne {}}
+ S $id SENTINEL SET mymaster down-after-milliseconds 2000
+ S $id SENTINEL SET mymaster failover-timeout 10000
+ S $id SENTINEL debug tilt-period 5000
+ S $id SENTINEL SET mymaster parallel-syncs 10
+ if {$::leaked_fds_file != "" && [exec uname] == "Linux"} {
+ S $id SENTINEL SET mymaster notification-script ../../tests/helpers/check_leaked_fds.tcl
+ S $id SENTINEL SET mymaster client-reconfig-script ../../tests/helpers/check_leaked_fds.tcl
+ }
+ }
+}
+
+test "(init) Sentinels can talk with the master" {
+ foreach_sentinel_id id {
+ wait_for_condition 1000 50 {
+ [catch {S $id SENTINEL GET-MASTER-ADDR-BY-NAME mymaster}] == 0
+ } else {
+ fail "Sentinel $id can't talk with the master."
+ }
+ }
+}
+
+test "(init) Sentinels are able to auto-discover other sentinels" {
+ verify_sentinel_auto_discovery
+}
+
+test "(init) Sentinels are able to auto-discover slaves" {
+ foreach_sentinel_id id {
+ wait_for_condition 1000 50 {
+ [dict get [S $id SENTINEL MASTER mymaster] num-slaves] == $redis_slaves
+ } else {
+ fail "At least some sentinel can't detect some slave"
+ }
+ }
+}
diff --git a/tests/sentinel/tests/includes/sentinel.conf b/tests/sentinel/tests/includes/sentinel.conf
new file mode 100644
index 0000000..1275236
--- /dev/null
+++ b/tests/sentinel/tests/includes/sentinel.conf
@@ -0,0 +1,9 @@
+# assume master is down after being unresponsive for 20s
+sentinel down-after-milliseconds setmaster 20000
+# reconfigure one slave at a time
+sentinel parallel-syncs setmaster 2
+# wait for 4m before assuming failover went wrong
+sentinel failover-timeout setmaster 240000
+# monitoring set
+sentinel monitor setmaster 10.0.0.1 30000 2
+
diff --git a/tests/sentinel/tests/includes/start-init-tests.tcl b/tests/sentinel/tests/includes/start-init-tests.tcl
new file mode 100644
index 0000000..b052350
--- /dev/null
+++ b/tests/sentinel/tests/includes/start-init-tests.tcl
@@ -0,0 +1,18 @@
+test "(start-init) Flush config and compare rewrite config file lines" {
+ foreach_sentinel_id id {
+ assert_match "OK" [S $id SENTINEL FLUSHCONFIG]
+ set file1 ../tests/includes/sentinel.conf
+ set file2 [file join "sentinel_${id}" "sentinel.conf"]
+ set fh1 [open $file1 r]
+ set fh2 [open $file2 r]
+ while {[gets $fh1 line1]} {
+ if {[gets $fh2 line2]} {
+ assert [string equal $line1 $line2]
+ } else {
+ fail "sentinel config file rewrite sequence changed"
+ }
+ }
+ close $fh1
+ close $fh2
+ }
+} \ No newline at end of file
diff --git a/tests/sentinel/tests/includes/utils.tcl b/tests/sentinel/tests/includes/utils.tcl
new file mode 100644
index 0000000..adfd91c
--- /dev/null
+++ b/tests/sentinel/tests/includes/utils.tcl
@@ -0,0 +1,22 @@
+proc restart_killed_instances {} {
+ foreach type {redis sentinel} {
+ foreach_${type}_id id {
+ if {[get_instance_attrib $type $id pid] == -1} {
+ puts -nonewline "$type/$id "
+ flush stdout
+ restart_instance $type $id
+ }
+ }
+ }
+}
+
+proc verify_sentinel_auto_discovery {} {
+ set sentinels [llength $::sentinel_instances]
+ foreach_sentinel_id id {
+ wait_for_condition 1000 50 {
+ [dict get [S $id SENTINEL MASTER mymaster] num-other-sentinels] == ($sentinels-1)
+ } else {
+ fail "At least some sentinel can't detect some other sentinel"
+ }
+ }
+}
diff --git a/tests/sentinel/tmp/.gitignore b/tests/sentinel/tmp/.gitignore
new file mode 100644
index 0000000..f581f73
--- /dev/null
+++ b/tests/sentinel/tmp/.gitignore
@@ -0,0 +1,2 @@
+redis_*
+sentinel_*
diff --git a/tests/support/aofmanifest.tcl b/tests/support/aofmanifest.tcl
new file mode 100644
index 0000000..ffde3e3
--- /dev/null
+++ b/tests/support/aofmanifest.tcl
@@ -0,0 +1,169 @@
+set ::base_aof_sufix ".base"
+set ::incr_aof_sufix ".incr"
+set ::manifest_suffix ".manifest"
+set ::aof_format_suffix ".aof"
+set ::rdb_format_suffix ".rdb"
+
+proc get_full_path {dir filename} {
+ set _ [format "%s/%s" $dir $filename]
+}
+
+proc join_path {dir1 dir2} {
+ return [format "%s/%s" $dir1 $dir2]
+}
+
+proc get_redis_dir {} {
+ set config [srv config]
+ set _ [dict get $config "dir"]
+}
+
+proc check_file_exist {dir filename} {
+ set file_path [get_full_path $dir $filename]
+ return [file exists $file_path]
+}
+
+proc del_file {dir filename} {
+ set file_path [get_full_path $dir $filename]
+ catch {exec rm -rf $file_path}
+}
+
+proc get_cur_base_aof_name {manifest_filepath} {
+ set fp [open $manifest_filepath r+]
+ set lines {}
+ while {1} {
+ set line [gets $fp]
+ if {[eof $fp]} {
+ close $fp
+ break;
+ }
+
+ lappend lines $line
+ }
+
+ if {[llength $lines] == 0} {
+ return ""
+ }
+
+ set first_line [lindex $lines 0]
+ set aofname [lindex [split $first_line " "] 1]
+ set aoftype [lindex [split $first_line " "] 5]
+ if { $aoftype eq "b" } {
+ return $aofname
+ }
+
+ return ""
+}
+
+proc get_last_incr_aof_name {manifest_filepath} {
+ set fp [open $manifest_filepath r+]
+ set lines {}
+ while {1} {
+ set line [gets $fp]
+ if {[eof $fp]} {
+ close $fp
+ break;
+ }
+
+ lappend lines $line
+ }
+
+ if {[llength $lines] == 0} {
+ return ""
+ }
+
+ set len [llength $lines]
+ set last_line [lindex $lines [expr $len - 1]]
+ set aofname [lindex [split $last_line " "] 1]
+ set aoftype [lindex [split $last_line " "] 5]
+ if { $aoftype eq "i" } {
+ return $aofname
+ }
+
+ return ""
+}
+
+proc get_last_incr_aof_path {r} {
+ set dir [lindex [$r config get dir] 1]
+ set appenddirname [lindex [$r config get appenddirname] 1]
+ set appendfilename [lindex [$r config get appendfilename] 1]
+ set manifest_filepath [file join $dir $appenddirname $appendfilename$::manifest_suffix]
+ set last_incr_aof_name [get_last_incr_aof_name $manifest_filepath]
+ if {$last_incr_aof_name == ""} {
+ return ""
+ }
+ return [file join $dir $appenddirname $last_incr_aof_name]
+}
+
+proc get_base_aof_path {r} {
+ set dir [lindex [$r config get dir] 1]
+ set appenddirname [lindex [$r config get appenddirname] 1]
+ set appendfilename [lindex [$r config get appendfilename] 1]
+ set manifest_filepath [file join $dir $appenddirname $appendfilename$::manifest_suffix]
+ set cur_base_aof_name [get_cur_base_aof_name $manifest_filepath]
+ if {$cur_base_aof_name == ""} {
+ return ""
+ }
+ return [file join $dir $appenddirname $cur_base_aof_name]
+}
+
+proc assert_aof_manifest_content {manifest_path content} {
+ set fp [open $manifest_path r+]
+ set lines {}
+ while {1} {
+ set line [gets $fp]
+ if {[eof $fp]} {
+ close $fp
+ break;
+ }
+
+ lappend lines $line
+ }
+
+ assert_equal [llength $lines] [llength $content]
+
+ for { set i 0 } { $i < [llength $lines] } {incr i} {
+ assert_equal [lindex $lines $i] [lindex $content $i]
+ }
+}
+
+proc clean_aof_persistence {aof_dirpath} {
+ catch {eval exec rm -rf [glob $aof_dirpath]}
+}
+
+proc append_to_manifest {str} {
+ upvar fp fp
+ puts -nonewline $fp $str
+}
+
+proc create_aof_manifest {dir aof_manifest_file code} {
+ create_aof_dir $dir
+ upvar fp fp
+ set fp [open $aof_manifest_file w+]
+ uplevel 1 $code
+ close $fp
+}
+
+proc append_to_aof {str} {
+ upvar fp fp
+ puts -nonewline $fp $str
+}
+
+proc create_aof {dir aof_file code} {
+ create_aof_dir $dir
+ upvar fp fp
+ set fp [open $aof_file w+]
+ uplevel 1 $code
+ close $fp
+}
+
+proc create_aof_dir {dir_path} {
+ file mkdir $dir_path
+}
+
+proc start_server_aof {overrides code} {
+ upvar defaults defaults srv srv server_path server_path
+ set config [concat $defaults $overrides]
+ set srv [start_server [list overrides $config]]
+ uplevel 1 $code
+ kill_server $srv
+}
diff --git a/tests/support/benchmark.tcl b/tests/support/benchmark.tcl
new file mode 100644
index 0000000..156b205
--- /dev/null
+++ b/tests/support/benchmark.tcl
@@ -0,0 +1,33 @@
+proc redisbenchmark_tls_config {testsdir} {
+ set tlsdir [file join $testsdir tls]
+ set cert [file join $tlsdir client.crt]
+ set key [file join $tlsdir client.key]
+ set cacert [file join $tlsdir ca.crt]
+
+ if {$::tls} {
+ return [list --tls --cert $cert --key $key --cacert $cacert]
+ } else {
+ return {}
+ }
+}
+
+proc redisbenchmark {host port {opts {}}} {
+ set cmd [list src/redis-benchmark -h $host -p $port]
+ lappend cmd {*}[redisbenchmark_tls_config "tests"]
+ lappend cmd {*}$opts
+ return $cmd
+}
+
+proc redisbenchmarkuri {host port {opts {}}} {
+ set cmd [list src/redis-benchmark -u redis://$host:$port]
+ lappend cmd {*}[redisbenchmark_tls_config "tests"]
+ lappend cmd {*}$opts
+ return $cmd
+}
+
+proc redisbenchmarkuriuserpass {host port user pass {opts {}}} {
+ set cmd [list src/redis-benchmark -u redis://$user:$pass@$host:$port]
+ lappend cmd {*}[redisbenchmark_tls_config "tests"]
+ lappend cmd {*}$opts
+ return $cmd
+}
diff --git a/tests/support/cli.tcl b/tests/support/cli.tcl
new file mode 100644
index 0000000..a080823
--- /dev/null
+++ b/tests/support/cli.tcl
@@ -0,0 +1,36 @@
+proc rediscli_tls_config {testsdir} {
+ set tlsdir [file join $testsdir tls]
+ set cert [file join $tlsdir client.crt]
+ set key [file join $tlsdir client.key]
+ set cacert [file join $tlsdir ca.crt]
+
+ if {$::tls} {
+ return [list --tls --cert $cert --key $key --cacert $cacert]
+ } else {
+ return {}
+ }
+}
+
+# Returns command line for executing redis-cli
+proc rediscli {host port {opts {}}} {
+ set cmd [list src/redis-cli -h $host -p $port]
+ lappend cmd {*}[rediscli_tls_config "tests"]
+ lappend cmd {*}$opts
+ return $cmd
+}
+
+# Returns command line for executing redis-cli with a unix socket address
+proc rediscli_unixsocket {unixsocket {opts {}}} {
+ return [list src/redis-cli -s $unixsocket {*}$opts]
+}
+
+# Run redis-cli with specified args on the server of specified level.
+# Returns output broken down into individual lines.
+proc rediscli_exec {level args} {
+ set cmd [rediscli_unixsocket [srv $level unixsocket] $args]
+ set fd [open "|$cmd" "r"]
+ set ret [lrange [split [read $fd] "\n"] 0 end-1]
+ close $fd
+
+ return $ret
+}
diff --git a/tests/support/cluster.tcl b/tests/support/cluster.tcl
new file mode 100644
index 0000000..081ef6a
--- /dev/null
+++ b/tests/support/cluster.tcl
@@ -0,0 +1,367 @@
+# Tcl redis cluster client as a wrapper of redis.rb.
+# Copyright (C) 2014 Salvatore Sanfilippo
+# Released under the BSD license like Redis itself
+#
+# Example usage:
+#
+# set c [redis_cluster {127.0.0.1:6379 127.0.0.1:6380}]
+# $c set foo
+# $c get foo
+# $c close
+
+package require Tcl 8.5
+package provide redis_cluster 0.1
+
+namespace eval redis_cluster {}
+set ::redis_cluster::internal_id 0
+set ::redis_cluster::id 0
+array set ::redis_cluster::startup_nodes {}
+array set ::redis_cluster::nodes {}
+array set ::redis_cluster::slots {}
+array set ::redis_cluster::tls {}
+
+# List of "plain" commands, which are commands where the sole key is always
+# the first argument.
+set ::redis_cluster::plain_commands {
+ get set setnx setex psetex append strlen exists setbit getbit
+ setrange getrange substr incr decr rpush lpush rpushx lpushx
+ linsert rpop lpop brpop llen lindex lset lrange ltrim lrem
+ sadd srem sismember smismember scard spop srandmember smembers sscan zadd
+ zincrby zrem zremrangebyscore zremrangebyrank zremrangebylex zrange
+ zrangebyscore zrevrangebyscore zrangebylex zrevrangebylex zcount
+ zlexcount zrevrange zcard zscore zmscore zrank zrevrank zscan hset hsetnx
+ hget hmset hmget hincrby hincrbyfloat hdel hlen hkeys hvals
+ hgetall hexists hscan incrby decrby incrbyfloat getset move
+ expire expireat pexpire pexpireat type ttl pttl persist restore
+ dump bitcount bitpos pfadd pfcount cluster ssubscribe spublish
+ sunsubscribe
+}
+
+# Create a cluster client. The nodes are given as a list of host:port. The TLS
+# parameter (1 or 0) is optional and defaults to the global $::tls.
+proc redis_cluster {nodes {tls -1}} {
+ set id [incr ::redis_cluster::id]
+ set ::redis_cluster::startup_nodes($id) $nodes
+ set ::redis_cluster::nodes($id) {}
+ set ::redis_cluster::slots($id) {}
+ set ::redis_cluster::tls($id) [expr $tls == -1 ? $::tls : $tls]
+ set handle [interp alias {} ::redis_cluster::instance$id {} ::redis_cluster::__dispatch__ $id]
+ $handle refresh_nodes_map
+ return $handle
+}
+
+# Totally reset the slots / nodes state for the client, calls
+# CLUSTER NODES in the first startup node available, populates the
+# list of nodes ::redis_cluster::nodes($id) with an hash mapping node
+# ip:port to a representation of the node (another hash), and finally
+# maps ::redis_cluster::slots($id) with an hash mapping slot numbers
+# to node IDs.
+#
+# This function is called when a new Redis Cluster client is initialized
+# and every time we get a -MOVED redirection error.
+proc ::redis_cluster::__method__refresh_nodes_map {id} {
+ # Contact the first responding startup node.
+ set idx 0; # Index of the node that will respond.
+ set errmsg {}
+ foreach start_node $::redis_cluster::startup_nodes($id) {
+ set ip_port [lindex [split $start_node @] 0]
+ lassign [split $ip_port :] start_host start_port
+ set tls $::redis_cluster::tls($id)
+ if {[catch {
+ set r {}
+ set r [redis $start_host $start_port 0 $tls]
+ set nodes_descr [$r cluster nodes]
+ $r close
+ } e]} {
+ if {$r ne {}} {catch {$r close}}
+ incr idx
+ if {[string length $errmsg] < 200} {
+ append errmsg " $ip_port: $e"
+ }
+ continue ; # Try next.
+ } else {
+ break; # Good node found.
+ }
+ }
+
+ if {$idx == [llength $::redis_cluster::startup_nodes($id)]} {
+ error "No good startup node found. $errmsg"
+ }
+
+ # Put the node that responded as first in the list if it is not
+ # already the first.
+ if {$idx != 0} {
+ set l $::redis_cluster::startup_nodes($id)
+ set left [lrange $l 0 [expr {$idx-1}]]
+ set right [lrange $l [expr {$idx+1}] end]
+ set l [concat [lindex $l $idx] $left $right]
+ set ::redis_cluster::startup_nodes($id) $l
+ }
+
+ # Parse CLUSTER NODES output to populate the nodes description.
+ set nodes {} ; # addr -> node description hash.
+ foreach line [split $nodes_descr "\n"] {
+ set line [string trim $line]
+ if {$line eq {}} continue
+ set args [split $line " "]
+ lassign $args nodeid addr flags slaveof pingsent pongrecv configepoch linkstate
+ set slots [lrange $args 8 end]
+ set addr [lindex [split $addr @] 0]
+ if {$addr eq {:0}} {
+ set addr $start_host:$start_port
+ }
+ lassign [split $addr :] host port
+
+ # Connect to the node
+ set link {}
+ set tls $::redis_cluster::tls($id)
+ catch {set link [redis $host $port 0 $tls]}
+
+ # Build this node description as an hash.
+ set node [dict create \
+ id $nodeid \
+ internal_id $id \
+ addr $addr \
+ host $host \
+ port $port \
+ flags $flags \
+ slaveof $slaveof \
+ slots $slots \
+ link $link \
+ ]
+ dict set nodes $addr $node
+ lappend ::redis_cluster::startup_nodes($id) $addr
+ }
+
+ # Close all the existing links in the old nodes map, and set the new
+ # map as current.
+ foreach n $::redis_cluster::nodes($id) {
+ catch {
+ [dict get $n link] close
+ }
+ }
+ set ::redis_cluster::nodes($id) $nodes
+
+ # Populates the slots -> nodes map.
+ dict for {addr node} $nodes {
+ foreach slotrange [dict get $node slots] {
+ lassign [split $slotrange -] start end
+ if {$end == {}} {set end $start}
+ for {set j $start} {$j <= $end} {incr j} {
+ dict set ::redis_cluster::slots($id) $j $addr
+ }
+ }
+ }
+
+ # Only retain unique entries in the startup nodes list
+ set ::redis_cluster::startup_nodes($id) [lsort -unique $::redis_cluster::startup_nodes($id)]
+}
+
+# Free a redis_cluster handle.
+proc ::redis_cluster::__method__close {id} {
+ catch {
+ set nodes $::redis_cluster::nodes($id)
+ dict for {addr node} $nodes {
+ catch {
+ [dict get $node link] close
+ }
+ }
+ }
+ catch {unset ::redis_cluster::startup_nodes($id)}
+ catch {unset ::redis_cluster::nodes($id)}
+ catch {unset ::redis_cluster::slots($id)}
+ catch {unset ::redis_cluster::tls($id)}
+ catch {interp alias {} ::redis_cluster::instance$id {}}
+}
+
+proc ::redis_cluster::__method__masternode_for_slot {id slot} {
+ # Get the node mapped to this slot.
+ set node_addr [dict get $::redis_cluster::slots($id) $slot]
+ if {$node_addr eq {}} {
+ error "No mapped node for slot $slot."
+ }
+ return [dict get $::redis_cluster::nodes($id) $node_addr]
+}
+
+proc ::redis_cluster::__method__masternode_notfor_slot {id slot} {
+ # Get a node that is not mapped to this slot.
+ set node_addr [dict get $::redis_cluster::slots($id) $slot]
+ set addrs [dict keys $::redis_cluster::nodes($id)]
+ foreach addr [lshuffle $addrs] {
+ set node [dict get $::redis_cluster::nodes($id) $addr]
+ if {$node_addr ne $addr && [dict get $node slaveof] eq "-"} {
+ return $node
+ }
+ }
+ error "Slot $slot is everywhere"
+}
+
+proc ::redis_cluster::__dispatch__ {id method args} {
+ if {[info command ::redis_cluster::__method__$method] eq {}} {
+ # Get the keys from the command.
+ set keys [::redis_cluster::get_keys_from_command $method $args]
+ if {$keys eq {}} {
+ error "Redis command '$method' is not supported by redis_cluster."
+ }
+
+ # Resolve the keys in the corresponding hash slot they hash to.
+ set slot [::redis_cluster::get_slot_from_keys $keys]
+ if {$slot eq {}} {
+ error "Invalid command: multiple keys not hashing to the same slot."
+ }
+
+ # Get the node mapped to this slot.
+ set node_addr [dict get $::redis_cluster::slots($id) $slot]
+ if {$node_addr eq {}} {
+ error "No mapped node for slot $slot."
+ }
+
+ # Execute the command in the node we think is the slot owner.
+ set retry 100
+ set asking 0
+ while {[incr retry -1]} {
+ if {$retry < 5} {after 100}
+ set node [dict get $::redis_cluster::nodes($id) $node_addr]
+ set link [dict get $node link]
+ if {$asking} {
+ $link ASKING
+ set asking 0
+ }
+ if {[catch {$link $method {*}$args} e]} {
+ if {$link eq {} || \
+ [string range $e 0 4] eq {MOVED} || \
+ [string range $e 0 2] eq {I/O} \
+ } {
+ # MOVED redirection.
+ ::redis_cluster::__method__refresh_nodes_map $id
+ set node_addr [dict get $::redis_cluster::slots($id) $slot]
+ continue
+ } elseif {[string range $e 0 2] eq {ASK}} {
+ # ASK redirection.
+ set node_addr [lindex $e 2]
+ set asking 1
+ continue
+ } else {
+ # Non redirecting error.
+ error $e $::errorInfo $::errorCode
+ }
+ } else {
+ # OK query went fine
+ return $e
+ }
+ }
+ error "Too many redirections or failures contacting Redis Cluster."
+ } else {
+ uplevel 1 [list ::redis_cluster::__method__$method $id] $args
+ }
+}
+
+proc ::redis_cluster::get_keys_from_command {cmd argv} {
+ set cmd [string tolower $cmd]
+ # Most Redis commands get just one key as first argument.
+ if {[lsearch -exact $::redis_cluster::plain_commands $cmd] != -1} {
+ return [list [lindex $argv 0]]
+ }
+
+ # Special handling for other commands
+ switch -exact $cmd {
+ mget {return $argv}
+ eval {return [lrange $argv 2 1+[lindex $argv 1]]}
+ evalsha {return [lrange $argv 2 1+[lindex $argv 1]]}
+ spublish {return [list [lindex $argv 1]]}
+ }
+
+ # All the remaining commands are not handled.
+ return {}
+}
+
+# Returns the CRC16 of the specified string.
+# The CRC parameters are described in the Redis Cluster specification.
+set ::redis_cluster::XMODEMCRC16Lookup {
+ 0x0000 0x1021 0x2042 0x3063 0x4084 0x50a5 0x60c6 0x70e7
+ 0x8108 0x9129 0xa14a 0xb16b 0xc18c 0xd1ad 0xe1ce 0xf1ef
+ 0x1231 0x0210 0x3273 0x2252 0x52b5 0x4294 0x72f7 0x62d6
+ 0x9339 0x8318 0xb37b 0xa35a 0xd3bd 0xc39c 0xf3ff 0xe3de
+ 0x2462 0x3443 0x0420 0x1401 0x64e6 0x74c7 0x44a4 0x5485
+ 0xa56a 0xb54b 0x8528 0x9509 0xe5ee 0xf5cf 0xc5ac 0xd58d
+ 0x3653 0x2672 0x1611 0x0630 0x76d7 0x66f6 0x5695 0x46b4
+ 0xb75b 0xa77a 0x9719 0x8738 0xf7df 0xe7fe 0xd79d 0xc7bc
+ 0x48c4 0x58e5 0x6886 0x78a7 0x0840 0x1861 0x2802 0x3823
+ 0xc9cc 0xd9ed 0xe98e 0xf9af 0x8948 0x9969 0xa90a 0xb92b
+ 0x5af5 0x4ad4 0x7ab7 0x6a96 0x1a71 0x0a50 0x3a33 0x2a12
+ 0xdbfd 0xcbdc 0xfbbf 0xeb9e 0x9b79 0x8b58 0xbb3b 0xab1a
+ 0x6ca6 0x7c87 0x4ce4 0x5cc5 0x2c22 0x3c03 0x0c60 0x1c41
+ 0xedae 0xfd8f 0xcdec 0xddcd 0xad2a 0xbd0b 0x8d68 0x9d49
+ 0x7e97 0x6eb6 0x5ed5 0x4ef4 0x3e13 0x2e32 0x1e51 0x0e70
+ 0xff9f 0xefbe 0xdfdd 0xcffc 0xbf1b 0xaf3a 0x9f59 0x8f78
+ 0x9188 0x81a9 0xb1ca 0xa1eb 0xd10c 0xc12d 0xf14e 0xe16f
+ 0x1080 0x00a1 0x30c2 0x20e3 0x5004 0x4025 0x7046 0x6067
+ 0x83b9 0x9398 0xa3fb 0xb3da 0xc33d 0xd31c 0xe37f 0xf35e
+ 0x02b1 0x1290 0x22f3 0x32d2 0x4235 0x5214 0x6277 0x7256
+ 0xb5ea 0xa5cb 0x95a8 0x8589 0xf56e 0xe54f 0xd52c 0xc50d
+ 0x34e2 0x24c3 0x14a0 0x0481 0x7466 0x6447 0x5424 0x4405
+ 0xa7db 0xb7fa 0x8799 0x97b8 0xe75f 0xf77e 0xc71d 0xd73c
+ 0x26d3 0x36f2 0x0691 0x16b0 0x6657 0x7676 0x4615 0x5634
+ 0xd94c 0xc96d 0xf90e 0xe92f 0x99c8 0x89e9 0xb98a 0xa9ab
+ 0x5844 0x4865 0x7806 0x6827 0x18c0 0x08e1 0x3882 0x28a3
+ 0xcb7d 0xdb5c 0xeb3f 0xfb1e 0x8bf9 0x9bd8 0xabbb 0xbb9a
+ 0x4a75 0x5a54 0x6a37 0x7a16 0x0af1 0x1ad0 0x2ab3 0x3a92
+ 0xfd2e 0xed0f 0xdd6c 0xcd4d 0xbdaa 0xad8b 0x9de8 0x8dc9
+ 0x7c26 0x6c07 0x5c64 0x4c45 0x3ca2 0x2c83 0x1ce0 0x0cc1
+ 0xef1f 0xff3e 0xcf5d 0xdf7c 0xaf9b 0xbfba 0x8fd9 0x9ff8
+ 0x6e17 0x7e36 0x4e55 0x5e74 0x2e93 0x3eb2 0x0ed1 0x1ef0
+}
+
+proc ::redis_cluster::crc16 {s} {
+ set s [encoding convertto ascii $s]
+ set crc 0
+ foreach char [split $s {}] {
+ scan $char %c byte
+ set crc [expr {(($crc<<8)&0xffff) ^ [lindex $::redis_cluster::XMODEMCRC16Lookup [expr {(($crc>>8)^$byte) & 0xff}]]}]
+ }
+ return $crc
+}
+
+# Hash a single key returning the slot it belongs to, Implemented hash
+# tags as described in the Redis Cluster specification.
+proc ::redis_cluster::hash {key} {
+ set keylen [string length $key]
+ set s {}
+ set e {}
+ for {set s 0} {$s < $keylen} {incr s} {
+ if {[string index $key $s] eq "\{"} break
+ }
+
+ if {[expr {$s == $keylen}]} {
+ set res [expr {[crc16 $key] & 16383}]
+ return $res
+ }
+
+ for {set e [expr {$s+1}]} {$e < $keylen} {incr e} {
+ if {[string index $key $e] == "\}"} break
+ }
+
+ if {$e == $keylen || $e == [expr {$s+1}]} {
+ set res [expr {[crc16 $key] & 16383}]
+ return $res
+ }
+
+ set key_sub [string range $key [expr {$s+1}] [expr {$e-1}]]
+ return [expr {[crc16 $key_sub] & 16383}]
+}
+
+# Return the slot the specified keys hash to.
+# If the keys hash to multiple slots, an empty string is returned to
+# signal that the command can't be run in Redis Cluster.
+proc ::redis_cluster::get_slot_from_keys {keys} {
+ set slot {}
+ foreach k $keys {
+ set s [::redis_cluster::hash $k]
+ if {$slot eq {}} {
+ set slot $s
+ } elseif {$slot != $s} {
+ return {} ; # Error
+ }
+ }
+ return $slot
+}
diff --git a/tests/support/cluster_util.tcl b/tests/support/cluster_util.tcl
new file mode 100644
index 0000000..2e3611e
--- /dev/null
+++ b/tests/support/cluster_util.tcl
@@ -0,0 +1,201 @@
+# Cluster helper functions
+
+# Check if cluster configuration is consistent.
+proc cluster_config_consistent {} {
+ for {set j 0} {$j < [llength $::servers]} {incr j} {
+ if {$j == 0} {
+ set base_cfg [R $j cluster slots]
+ } else {
+ if {[R $j cluster slots] != $base_cfg} {
+ return 0
+ }
+ }
+ }
+
+ return 1
+}
+
+# Check if cluster size is consistent.
+proc cluster_size_consistent {cluster_size} {
+ for {set j 0} {$j < $cluster_size} {incr j} {
+ if {[CI $j cluster_known_nodes] ne $cluster_size} {
+ return 0
+ }
+ }
+ return 1
+}
+
+# Wait for cluster configuration to propagate and be consistent across nodes.
+proc wait_for_cluster_propagation {} {
+ wait_for_condition 50 100 {
+ [cluster_config_consistent] eq 1
+ } else {
+ fail "cluster config did not reach a consistent state"
+ }
+}
+
+# Wait for cluster size to be consistent across nodes.
+proc wait_for_cluster_size {cluster_size} {
+ wait_for_condition 1000 50 {
+ [cluster_size_consistent $cluster_size] eq 1
+ } else {
+ fail "cluster size did not reach a consistent size $cluster_size"
+ }
+}
+
+# Check that cluster nodes agree about "state", or raise an error.
+proc wait_for_cluster_state {state} {
+ for {set j 0} {$j < [llength $::servers]} {incr j} {
+ wait_for_condition 100 50 {
+ [CI $j cluster_state] eq $state
+ } else {
+ fail "Cluster node $j cluster_state:[CI $j cluster_state]"
+ }
+ }
+}
+
+# Default slot allocation for clusters, each master has a continuous block
+# and approximately equal number of slots.
+proc continuous_slot_allocation {masters} {
+ set avg [expr double(16384) / $masters]
+ set slot_start 0
+ for {set j 0} {$j < $masters} {incr j} {
+ set slot_end [expr int(ceil(($j + 1) * $avg) - 1)]
+ R $j cluster addslotsrange $slot_start $slot_end
+ set slot_start [expr $slot_end + 1]
+ }
+}
+
+# Setup method to be executed to configure the cluster before the
+# tests run.
+proc cluster_setup {masters node_count slot_allocator code} {
+ # Have all nodes meet
+ if {$::tls} {
+ set tls_cluster [lindex [R 0 CONFIG GET tls-cluster] 1]
+ }
+ if {$::tls && !$tls_cluster} {
+ for {set i 1} {$i < $node_count} {incr i} {
+ R 0 CLUSTER MEET [srv -$i host] [srv -$i pport]
+ }
+ } else {
+ for {set i 1} {$i < $node_count} {incr i} {
+ R 0 CLUSTER MEET [srv -$i host] [srv -$i port]
+ }
+ }
+
+ $slot_allocator $masters
+
+ wait_for_cluster_propagation
+
+ # Setup master/replica relationships
+ for {set i 0} {$i < $masters} {incr i} {
+ set nodeid [R $i CLUSTER MYID]
+ for {set j [expr $i + $masters]} {$j < $node_count} {incr j $masters} {
+ R $j CLUSTER REPLICATE $nodeid
+ }
+ }
+
+ wait_for_cluster_propagation
+ wait_for_cluster_state "ok"
+
+ uplevel 1 $code
+}
+
+# Start a cluster with the given number of masters and replicas. Replicas
+# will be allocated to masters by round robin.
+proc start_cluster {masters replicas options code {slot_allocator continuous_slot_allocation}} {
+ set node_count [expr $masters + $replicas]
+
+ # Set the final code to be the tests + cluster setup
+ set code [list cluster_setup $masters $node_count $slot_allocator $code]
+
+ # Configure the starting of multiple servers. Set cluster node timeout
+ # aggressively since many tests depend on ping/pong messages.
+ set cluster_options [list overrides [list cluster-enabled yes cluster-ping-interval 100 cluster-node-timeout 3000]]
+ set options [concat $cluster_options $options]
+
+ # Cluster mode only supports a single database, so before executing the tests
+ # it needs to be configured correctly and needs to be reset after the tests.
+ set old_singledb $::singledb
+ set ::singledb 1
+ start_multiple_servers $node_count $options $code
+ set ::singledb $old_singledb
+}
+
+# Test node for flag.
+proc cluster_has_flag {node flag} {
+ expr {[lsearch -exact [dict get $node flags] $flag] != -1}
+}
+
+# Returns the parsed "myself" node entry as a dictionary.
+proc cluster_get_myself id {
+ set nodes [get_cluster_nodes $id]
+ foreach n $nodes {
+ if {[cluster_has_flag $n myself]} {return $n}
+ }
+ return {}
+}
+
+# Returns a parsed CLUSTER NODES output as a list of dictionaries.
+proc get_cluster_nodes id {
+ set lines [split [R $id cluster nodes] "\r\n"]
+ set nodes {}
+ foreach l $lines {
+ set l [string trim $l]
+ if {$l eq {}} continue
+ set args [split $l]
+ set node [dict create \
+ id [lindex $args 0] \
+ addr [lindex $args 1] \
+ flags [split [lindex $args 2] ,] \
+ slaveof [lindex $args 3] \
+ ping_sent [lindex $args 4] \
+ pong_recv [lindex $args 5] \
+ config_epoch [lindex $args 6] \
+ linkstate [lindex $args 7] \
+ slots [lrange $args 8 end] \
+ ]
+ lappend nodes $node
+ }
+ return $nodes
+}
+
+# Returns 1 if no node knows node_id, 0 if any node knows it.
+proc node_is_forgotten {node_id} {
+ for {set j 0} {$j < [llength $::servers]} {incr j} {
+ set cluster_nodes [R $j CLUSTER NODES]
+ if { [string match "*$node_id*" $cluster_nodes] } {
+ return 0
+ }
+ }
+ return 1
+}
+
+# Isolate a node from the cluster and give it a new nodeid
+proc isolate_node {id} {
+ set node_id [R $id CLUSTER MYID]
+ R $id CLUSTER RESET HARD
+ # Here we additionally test that CLUSTER FORGET propagates to all nodes.
+ set other_id [expr $id == 0 ? 1 : 0]
+ R $other_id CLUSTER FORGET $node_id
+ wait_for_condition 50 100 {
+ [node_is_forgotten $node_id]
+ } else {
+ fail "CLUSTER FORGET was not propagated to all nodes"
+ }
+}
+
+# Check if cluster's view of hostnames is consistent
+proc are_hostnames_propagated {match_string} {
+ for {set j 0} {$j < [llength $::servers]} {incr j} {
+ set cfg [R $j cluster slots]
+ foreach node $cfg {
+ for {set i 2} {$i < [llength $node]} {incr i} {
+ if {! [string match $match_string [lindex [lindex [lindex $node $i] 3] 1]] } {
+ return 0
+ }
+ }
+ }
+ }
+ return 1
+}
diff --git a/tests/support/redis.tcl b/tests/support/redis.tcl
new file mode 100644
index 0000000..53fa9fe
--- /dev/null
+++ b/tests/support/redis.tcl
@@ -0,0 +1,466 @@
+# Tcl client library - used by the Redis test
+# Copyright (C) 2009-2014 Salvatore Sanfilippo
+# Released under the BSD license like Redis itself
+#
+# Example usage:
+#
+# set r [redis 127.0.0.1 6379]
+# $r lpush mylist foo
+# $r lpush mylist bar
+# $r lrange mylist 0 -1
+# $r close
+#
+# Non blocking usage example:
+#
+# proc handlePong {r type reply} {
+# puts "PONG $type '$reply'"
+# if {$reply ne "PONG"} {
+# $r ping [list handlePong]
+# }
+# }
+#
+# set r [redis]
+# $r blocking 0
+# $r get fo [list handlePong]
+#
+# vwait forever
+
+package require Tcl 8.5
+package provide redis 0.1
+
+source [file join [file dirname [info script]] "response_transformers.tcl"]
+
+namespace eval redis {}
+set ::redis::id 0
+array set ::redis::fd {}
+array set ::redis::addr {}
+array set ::redis::blocking {}
+array set ::redis::deferred {}
+array set ::redis::readraw {}
+array set ::redis::attributes {} ;# Holds the RESP3 attributes from the last call
+array set ::redis::reconnect {}
+array set ::redis::tls {}
+array set ::redis::callback {}
+array set ::redis::state {} ;# State in non-blocking reply reading
+array set ::redis::statestack {} ;# Stack of states, for nested mbulks
+array set ::redis::curr_argv {} ;# Remember the current argv, to be used in response_transformers.tcl
+array set ::redis::testing_resp3 {} ;# Indicating if the current client is using RESP3 (only if the test is trying to test RESP3 specific behavior. It won't be on in case of force_resp3)
+
+set ::force_resp3 0
+set ::log_req_res 0
+
+proc redis {{server 127.0.0.1} {port 6379} {defer 0} {tls 0} {tlsoptions {}} {readraw 0}} {
+ if {$tls} {
+ package require tls
+ ::tls::init \
+ -cafile "$::tlsdir/ca.crt" \
+ -certfile "$::tlsdir/client.crt" \
+ -keyfile "$::tlsdir/client.key" \
+ {*}$tlsoptions
+ set fd [::tls::socket $server $port]
+ } else {
+ set fd [socket $server $port]
+ }
+ fconfigure $fd -translation binary
+ set id [incr ::redis::id]
+ set ::redis::fd($id) $fd
+ set ::redis::addr($id) [list $server $port]
+ set ::redis::blocking($id) 1
+ set ::redis::deferred($id) $defer
+ set ::redis::readraw($id) $readraw
+ set ::redis::reconnect($id) 0
+ set ::redis::curr_argv($id) 0
+ set ::redis::testing_resp3($id) 0
+ set ::redis::tls($id) $tls
+ ::redis::redis_reset_state $id
+ interp alias {} ::redis::redisHandle$id {} ::redis::__dispatch__ $id
+}
+
+# On recent versions of tcl-tls/OpenSSL, reading from a dropped connection
+# results with an error we need to catch and mimic the old behavior.
+proc ::redis::redis_safe_read {fd len} {
+ if {$len == -1} {
+ set err [catch {set val [read $fd]} msg]
+ } else {
+ set err [catch {set val [read $fd $len]} msg]
+ }
+ if {!$err} {
+ return $val
+ }
+ if {[string match "*connection abort*" $msg]} {
+ return {}
+ }
+ error $msg
+}
+
+proc ::redis::redis_safe_gets {fd} {
+ if {[catch {set val [gets $fd]} msg]} {
+ if {[string match "*connection abort*" $msg]} {
+ return {}
+ }
+ error $msg
+ }
+ return $val
+}
+
+# This is a wrapper to the actual dispatching procedure that handles
+# reconnection if needed.
+proc ::redis::__dispatch__ {id method args} {
+ set errorcode [catch {::redis::__dispatch__raw__ $id $method $args} retval]
+ if {$errorcode && $::redis::reconnect($id) && $::redis::fd($id) eq {}} {
+ # Try again if the connection was lost.
+ # FIXME: we don't re-select the previously selected DB, nor we check
+ # if we are inside a transaction that needs to be re-issued from
+ # scratch.
+ set errorcode [catch {::redis::__dispatch__raw__ $id $method $args} retval]
+ }
+ return -code $errorcode $retval
+}
+
+proc ::redis::__dispatch__raw__ {id method argv} {
+ set fd $::redis::fd($id)
+
+ # Reconnect the link if needed.
+ if {$fd eq {} && $method ne {close}} {
+ lassign $::redis::addr($id) host port
+ if {$::redis::tls($id)} {
+ set ::redis::fd($id) [::tls::socket $host $port]
+ } else {
+ set ::redis::fd($id) [socket $host $port]
+ }
+ fconfigure $::redis::fd($id) -translation binary
+ set fd $::redis::fd($id)
+ }
+
+ # Transform HELLO 2 to HELLO 3 if force_resp3
+ # All set the connection var testing_resp3 in case of HELLO 3
+ if {[llength $argv] > 0 && [string compare -nocase $method "HELLO"] == 0} {
+ if {[lindex $argv 0] == 3} {
+ set ::redis::testing_resp3($id) 1
+ } else {
+ set ::redis::testing_resp3($id) 0
+ if {$::force_resp3} {
+ # If we are in force_resp3 we run HELLO 3 instead of HELLO 2
+ lset argv 0 3
+ }
+ }
+ }
+
+ set blocking $::redis::blocking($id)
+ set deferred $::redis::deferred($id)
+ if {$blocking == 0} {
+ if {[llength $argv] == 0} {
+ error "Please provide a callback in non-blocking mode"
+ }
+ set callback [lindex $argv end]
+ set argv [lrange $argv 0 end-1]
+ }
+ if {[info command ::redis::__method__$method] eq {}} {
+ catch {unset ::redis::attributes($id)}
+ set cmd "*[expr {[llength $argv]+1}]\r\n"
+ append cmd "$[string length $method]\r\n$method\r\n"
+ foreach a $argv {
+ append cmd "$[string length $a]\r\n$a\r\n"
+ }
+ ::redis::redis_write $fd $cmd
+ if {[catch {flush $fd}]} {
+ catch {close $fd}
+ set ::redis::fd($id) {}
+ return -code error "I/O error reading reply"
+ }
+
+ set ::redis::curr_argv($id) [concat $method $argv]
+ if {!$deferred} {
+ if {$blocking} {
+ ::redis::redis_read_reply $id $fd
+ } else {
+ # Every well formed reply read will pop an element from this
+ # list and use it as a callback. So pipelining is supported
+ # in non blocking mode.
+ lappend ::redis::callback($id) $callback
+ fileevent $fd readable [list ::redis::redis_readable $fd $id]
+ }
+ }
+ } else {
+ uplevel 1 [list ::redis::__method__$method $id $fd] $argv
+ }
+}
+
+proc ::redis::__method__blocking {id fd val} {
+ set ::redis::blocking($id) $val
+ fconfigure $fd -blocking $val
+}
+
+proc ::redis::__method__reconnect {id fd val} {
+ set ::redis::reconnect($id) $val
+}
+
+proc ::redis::__method__read {id fd} {
+ ::redis::redis_read_reply $id $fd
+}
+
+proc ::redis::__method__rawread {id fd {len -1}} {
+ return [redis_safe_read $fd $len]
+}
+
+proc ::redis::__method__write {id fd buf} {
+ ::redis::redis_write $fd $buf
+}
+
+proc ::redis::__method__flush {id fd} {
+ flush $fd
+}
+
+proc ::redis::__method__close {id fd} {
+ catch {close $fd}
+ catch {unset ::redis::fd($id)}
+ catch {unset ::redis::addr($id)}
+ catch {unset ::redis::blocking($id)}
+ catch {unset ::redis::deferred($id)}
+ catch {unset ::redis::readraw($id)}
+ catch {unset ::redis::attributes($id)}
+ catch {unset ::redis::reconnect($id)}
+ catch {unset ::redis::tls($id)}
+ catch {unset ::redis::state($id)}
+ catch {unset ::redis::statestack($id)}
+ catch {unset ::redis::callback($id)}
+ catch {unset ::redis::curr_argv($id)}
+ catch {unset ::redis::testing_resp3($id)}
+ catch {interp alias {} ::redis::redisHandle$id {}}
+}
+
+proc ::redis::__method__channel {id fd} {
+ return $fd
+}
+
+proc ::redis::__method__deferred {id fd val} {
+ set ::redis::deferred($id) $val
+}
+
+proc ::redis::__method__readraw {id fd val} {
+ set ::redis::readraw($id) $val
+}
+
+proc ::redis::__method__readingraw {id fd} {
+ return $::redis::readraw($id)
+}
+
+proc ::redis::__method__attributes {id fd} {
+ set _ $::redis::attributes($id)
+}
+
+proc ::redis::redis_write {fd buf} {
+ puts -nonewline $fd $buf
+}
+
+proc ::redis::redis_writenl {fd buf} {
+ redis_write $fd $buf
+ redis_write $fd "\r\n"
+ flush $fd
+}
+
+proc ::redis::redis_readnl {fd len} {
+ set buf [redis_safe_read $fd $len]
+ redis_safe_read $fd 2 ; # discard CR LF
+ return $buf
+}
+
+proc ::redis::redis_bulk_read {fd} {
+ set count [redis_read_line $fd]
+ if {$count == -1} return {}
+ set buf [redis_readnl $fd $count]
+ return $buf
+}
+
+proc ::redis::redis_multi_bulk_read {id fd} {
+ set count [redis_read_line $fd]
+ if {$count == -1} return {}
+ set l {}
+ set err {}
+ for {set i 0} {$i < $count} {incr i} {
+ if {[catch {
+ lappend l [redis_read_reply_logic $id $fd]
+ } e] && $err eq {}} {
+ set err $e
+ }
+ }
+ if {$err ne {}} {return -code error $err}
+ return $l
+}
+
+proc ::redis::redis_read_map {id fd} {
+ set count [redis_read_line $fd]
+ if {$count == -1} return {}
+ set d {}
+ set err {}
+ for {set i 0} {$i < $count} {incr i} {
+ if {[catch {
+ set k [redis_read_reply_logic $id $fd] ; # key
+ set v [redis_read_reply_logic $id $fd] ; # value
+ dict set d $k $v
+ } e] && $err eq {}} {
+ set err $e
+ }
+ }
+ if {$err ne {}} {return -code error $err}
+ return $d
+}
+
+proc ::redis::redis_read_line fd {
+ string trim [redis_safe_gets $fd]
+}
+
+proc ::redis::redis_read_null fd {
+ redis_safe_gets $fd
+ return {}
+}
+
+proc ::redis::redis_read_bool fd {
+ set v [redis_read_line $fd]
+ if {$v == "t"} {return 1}
+ if {$v == "f"} {return 0}
+ return -code error "Bad protocol, '$v' as bool type"
+}
+
+proc ::redis::redis_read_double {id fd} {
+ set v [redis_read_line $fd]
+ # unlike many other DTs, there is a textual difference between double and a string with the same value,
+ # so we need to transform to double if we are testing RESP3 (i.e. some tests check that a
+ # double reply is "1.0" and not "1")
+ if {[should_transform_to_resp2 $id]} {
+ return $v
+ } else {
+ return [expr {double($v)}]
+ }
+}
+
+proc ::redis::redis_read_verbatim_str fd {
+ set v [redis_bulk_read $fd]
+ # strip the first 4 chars ("txt:")
+ return [string range $v 4 end]
+}
+
+proc ::redis::redis_read_reply_logic {id fd} {
+ if {$::redis::readraw($id)} {
+ return [redis_read_line $fd]
+ }
+
+ while {1} {
+ set type [redis_safe_read $fd 1]
+ switch -exact -- $type {
+ _ {return [redis_read_null $fd]}
+ : -
+ ( -
+ + {return [redis_read_line $fd]}
+ , {return [redis_read_double $id $fd]}
+ # {return [redis_read_bool $fd]}
+ = {return [redis_read_verbatim_str $fd]}
+ - {return -code error [redis_read_line $fd]}
+ $ {return [redis_bulk_read $fd]}
+ > -
+ ~ -
+ * {return [redis_multi_bulk_read $id $fd]}
+ % {return [redis_read_map $id $fd]}
+ | {
+ set attrib [redis_read_map $id $fd]
+ set ::redis::attributes($id) $attrib
+ continue
+ }
+ default {
+ if {$type eq {}} {
+ catch {close $fd}
+ set ::redis::fd($id) {}
+ return -code error "I/O error reading reply"
+ }
+ return -code error "Bad protocol, '$type' as reply type byte"
+ }
+ }
+ }
+}
+
+proc ::redis::redis_read_reply {id fd} {
+ set response [redis_read_reply_logic $id $fd]
+ ::response_transformers::transform_response_if_needed $id $::redis::curr_argv($id) $response
+}
+
+proc ::redis::redis_reset_state id {
+ set ::redis::state($id) [dict create buf {} mbulk -1 bulk -1 reply {}]
+ set ::redis::statestack($id) {}
+}
+
+proc ::redis::redis_call_callback {id type reply} {
+ set cb [lindex $::redis::callback($id) 0]
+ set ::redis::callback($id) [lrange $::redis::callback($id) 1 end]
+ uplevel #0 $cb [list ::redis::redisHandle$id $type $reply]
+ ::redis::redis_reset_state $id
+}
+
+# Read a reply in non-blocking mode.
+proc ::redis::redis_readable {fd id} {
+ if {[eof $fd]} {
+ redis_call_callback $id eof {}
+ ::redis::__method__close $id $fd
+ return
+ }
+ if {[dict get $::redis::state($id) bulk] == -1} {
+ set line [gets $fd]
+ if {$line eq {}} return ;# No complete line available, return
+ switch -exact -- [string index $line 0] {
+ : -
+ + {redis_call_callback $id reply [string range $line 1 end-1]}
+ - {redis_call_callback $id err [string range $line 1 end-1]}
+ ( {redis_call_callback $id reply [string range $line 1 end-1]}
+ $ {
+ dict set ::redis::state($id) bulk \
+ [expr [string range $line 1 end-1]+2]
+ if {[dict get $::redis::state($id) bulk] == 1} {
+ # We got a $-1, hack the state to play well with this.
+ dict set ::redis::state($id) bulk 2
+ dict set ::redis::state($id) buf "\r\n"
+ ::redis::redis_readable $fd $id
+ }
+ }
+ * {
+ dict set ::redis::state($id) mbulk [string range $line 1 end-1]
+ # Handle *-1
+ if {[dict get $::redis::state($id) mbulk] == -1} {
+ redis_call_callback $id reply {}
+ }
+ }
+ default {
+ redis_call_callback $id err \
+ "Bad protocol, $type as reply type byte"
+ }
+ }
+ } else {
+ set totlen [dict get $::redis::state($id) bulk]
+ set buflen [string length [dict get $::redis::state($id) buf]]
+ set toread [expr {$totlen-$buflen}]
+ set data [read $fd $toread]
+ set nread [string length $data]
+ dict append ::redis::state($id) buf $data
+ # Check if we read a complete bulk reply
+ if {[string length [dict get $::redis::state($id) buf]] ==
+ [dict get $::redis::state($id) bulk]} {
+ if {[dict get $::redis::state($id) mbulk] == -1} {
+ redis_call_callback $id reply \
+ [string range [dict get $::redis::state($id) buf] 0 end-2]
+ } else {
+ dict with ::redis::state($id) {
+ lappend reply [string range $buf 0 end-2]
+ incr mbulk -1
+ set bulk -1
+ }
+ if {[dict get $::redis::state($id) mbulk] == 0} {
+ redis_call_callback $id reply \
+ [dict get $::redis::state($id) reply]
+ }
+ }
+ }
+ }
+}
+
+# when forcing resp3 some tests that rely on resp2 can fail, so we have to translate the resp3 response to resp2
+proc ::redis::should_transform_to_resp2 {id} {
+ return [expr {$::force_resp3 && !$::redis::testing_resp3($id)}]
+}
diff --git a/tests/support/response_transformers.tcl b/tests/support/response_transformers.tcl
new file mode 100644
index 0000000..45b3cf8
--- /dev/null
+++ b/tests/support/response_transformers.tcl
@@ -0,0 +1,105 @@
+# Tcl client library - used by the Redis test
+# Copyright (C) 2009-2023 Redis Ltd.
+# Released under the BSD license like Redis itself
+#
+# This file contains a bunch of commands whose purpose is to transform
+# a RESP3 response to RESP2
+# Why is it needed?
+# When writing the reply_schema part in COMMAND DOCS we decided to use
+# the existing tests in order to verify the schemas (see logreqres.c)
+# The problem was that many tests were relying on the RESP2 structure
+# of the response (e.g. HRANDFIELD WITHVALUES in RESP2: {f1 v1 f2 v2}
+# vs. RESP3: {{f1 v1} {f2 v2}}).
+# Instead of adjusting the tests to expect RESP3 responses (a lot of
+# changes in many files) we decided to transform the response to RESP2
+# when running with --force-resp3
+
+package require Tcl 8.5
+
+namespace eval response_transformers {}
+
+# Transform a map response into an array of tuples (tuple = array with 2 elements)
+# Used for XREAD[GROUP]
+proc transfrom_map_to_tupple_array {argv response} {
+ set tuparray {}
+ foreach {key val} $response {
+ set tmp {}
+ lappend tmp $key
+ lappend tmp $val
+ lappend tuparray $tmp
+ }
+ return $tuparray
+}
+
+# Transform an array of tuples to a flat array
+proc transfrom_tuple_array_to_flat_array {argv response} {
+ set flatarray {}
+ foreach pair $response {
+ lappend flatarray {*}$pair
+ }
+ return $flatarray
+}
+
+# With HRANDFIELD, we only need to transform the response if the request had WITHVALUES
+# (otherwise the returned response is a flat array in both RESPs)
+proc transfrom_hrandfield_command {argv response} {
+ foreach ele $argv {
+ if {[string compare -nocase $ele "WITHVALUES"] == 0} {
+ return [transfrom_tuple_array_to_flat_array $argv $response]
+ }
+ }
+ return $response
+}
+
+# With some zset commands, we only need to transform the response if the request had WITHSCORES
+# (otherwise the returned response is a flat array in both RESPs)
+proc transfrom_zset_withscores_command {argv response} {
+ foreach ele $argv {
+ if {[string compare -nocase $ele "WITHSCORES"] == 0} {
+ return [transfrom_tuple_array_to_flat_array $argv $response]
+ }
+ }
+ return $response
+}
+
+# With ZPOPMIN/ZPOPMAX, we only need to transform the response if the request had COUNT (3rd arg)
+# (otherwise the returned response is a flat array in both RESPs)
+proc transfrom_zpopmin_zpopmax {argv response} {
+ if {[llength $argv] == 3} {
+ return [transfrom_tuple_array_to_flat_array $argv $response]
+ }
+ return $response
+}
+
+set ::trasformer_funcs {
+ XREAD transfrom_map_to_tupple_array
+ XREADGROUP transfrom_map_to_tupple_array
+ HRANDFIELD transfrom_hrandfield_command
+ ZRANDMEMBER transfrom_zset_withscores_command
+ ZRANGE transfrom_zset_withscores_command
+ ZRANGEBYSCORE transfrom_zset_withscores_command
+ ZRANGEBYLEX transfrom_zset_withscores_command
+ ZREVRANGE transfrom_zset_withscores_command
+ ZREVRANGEBYSCORE transfrom_zset_withscores_command
+ ZREVRANGEBYLEX transfrom_zset_withscores_command
+ ZUNION transfrom_zset_withscores_command
+ ZDIFF transfrom_zset_withscores_command
+ ZINTER transfrom_zset_withscores_command
+ ZPOPMIN transfrom_zpopmin_zpopmax
+ ZPOPMAX transfrom_zpopmin_zpopmax
+}
+
+proc ::response_transformers::transform_response_if_needed {id argv response} {
+ if {![::redis::should_transform_to_resp2 $id] || $::redis::readraw($id)} {
+ return $response
+ }
+
+ set key [string toupper [lindex $argv 0]]
+ if {![dict exists $::trasformer_funcs $key]} {
+ return $response
+ }
+
+ set transform [dict get $::trasformer_funcs $key]
+
+ return [$transform $argv $response]
+}
diff --git a/tests/support/server.tcl b/tests/support/server.tcl
new file mode 100644
index 0000000..67979e5
--- /dev/null
+++ b/tests/support/server.tcl
@@ -0,0 +1,789 @@
+set ::global_overrides {}
+set ::tags {}
+set ::valgrind_errors {}
+
+proc start_server_error {config_file error} {
+ set err {}
+ append err "Can't start the Redis server\n"
+ append err "CONFIGURATION:"
+ append err [exec cat $config_file]
+ append err "\nERROR:"
+ append err [string trim $error]
+ send_data_packet $::test_server_fd err $err
+}
+
+proc check_valgrind_errors stderr {
+ set res [find_valgrind_errors $stderr true]
+ if {$res != ""} {
+ send_data_packet $::test_server_fd err "Valgrind error: $res\n"
+ }
+}
+
+proc check_sanitizer_errors stderr {
+ set res [sanitizer_errors_from_file $stderr]
+ if {$res != ""} {
+ send_data_packet $::test_server_fd err "Sanitizer error: $res\n"
+ }
+}
+
+proc clean_persistence config {
+ # we may wanna keep the logs for later, but let's clean the persistence
+ # files right away, since they can accumulate and take up a lot of space
+ set config [dict get $config "config"]
+ set dir [dict get $config "dir"]
+ set rdb [format "%s/%s" $dir "dump.rdb"]
+ if {[dict exists $config "appenddirname"]} {
+ set aofdir [dict get $config "appenddirname"]
+ } else {
+ set aofdir "appendonlydir"
+ }
+ set aof_dirpath [format "%s/%s" $dir $aofdir]
+ clean_aof_persistence $aof_dirpath
+ catch {exec rm -rf $rdb}
+}
+
+proc kill_server config {
+ # nothing to kill when running against external server
+ if {$::external} return
+
+ # Close client connection if exists
+ if {[dict exists $config "client"]} {
+ [dict get $config "client"] close
+ }
+
+ # nevermind if its already dead
+ if {![is_alive $config]} {
+ # Check valgrind errors if needed
+ if {$::valgrind} {
+ check_valgrind_errors [dict get $config stderr]
+ }
+
+ check_sanitizer_errors [dict get $config stderr]
+ return
+ }
+ set pid [dict get $config pid]
+
+ # check for leaks
+ if {![dict exists $config "skipleaks"]} {
+ catch {
+ if {[string match {*Darwin*} [exec uname -a]]} {
+ tags {"leaks"} {
+ test "Check for memory leaks (pid $pid)" {
+ set output {0 leaks}
+ catch {exec leaks $pid} output option
+ # In a few tests we kill the server process, so leaks will not find it.
+ # It'll exits with exit code >1 on error, so we ignore these.
+ if {[dict exists $option -errorcode]} {
+ set details [dict get $option -errorcode]
+ if {[lindex $details 0] eq "CHILDSTATUS"} {
+ set status [lindex $details 2]
+ if {$status > 1} {
+ set output "0 leaks"
+ }
+ }
+ }
+ set output
+ } {*0 leaks*}
+ }
+ }
+ }
+ }
+
+ # kill server and wait for the process to be totally exited
+ send_data_packet $::test_server_fd server-killing $pid
+ catch {exec kill $pid}
+ # Node might have been stopped in the test
+ catch {exec kill -SIGCONT $pid}
+ if {$::valgrind} {
+ set max_wait 120000
+ } else {
+ set max_wait 10000
+ }
+ while {[is_alive $config]} {
+ incr wait 10
+
+ if {$wait == $max_wait} {
+ puts "Forcing process $pid to crash..."
+ catch {exec kill -SEGV $pid}
+ } elseif {$wait >= $max_wait * 2} {
+ puts "Forcing process $pid to exit..."
+ catch {exec kill -KILL $pid}
+ } elseif {$wait % 1000 == 0} {
+ puts "Waiting for process $pid to exit..."
+ }
+ after 10
+ }
+
+ # Check valgrind errors if needed
+ if {$::valgrind} {
+ check_valgrind_errors [dict get $config stderr]
+ }
+
+ check_sanitizer_errors [dict get $config stderr]
+
+ # Remove this pid from the set of active pids in the test server.
+ send_data_packet $::test_server_fd server-killed $pid
+}
+
+proc is_alive config {
+ set pid [dict get $config pid]
+ if {[catch {exec kill -0 $pid} err]} {
+ return 0
+ } else {
+ return 1
+ }
+}
+
+proc ping_server {host port} {
+ set retval 0
+ if {[catch {
+ if {$::tls} {
+ set fd [::tls::socket $host $port]
+ } else {
+ set fd [socket $host $port]
+ }
+ fconfigure $fd -translation binary
+ puts $fd "PING\r\n"
+ flush $fd
+ set reply [gets $fd]
+ if {[string range $reply 0 0] eq {+} ||
+ [string range $reply 0 0] eq {-}} {
+ set retval 1
+ }
+ close $fd
+ } e]} {
+ if {$::verbose} {
+ puts -nonewline "."
+ }
+ } else {
+ if {$::verbose} {
+ puts -nonewline "ok"
+ }
+ }
+ return $retval
+}
+
+# Return 1 if the server at the specified addr is reachable by PING, otherwise
+# returns 0. Performs a try every 50 milliseconds for the specified number
+# of retries.
+proc server_is_up {host port retrynum} {
+ after 10 ;# Use a small delay to make likely a first-try success.
+ set retval 0
+ while {[incr retrynum -1]} {
+ if {[catch {ping_server $host $port} ping]} {
+ set ping 0
+ }
+ if {$ping} {return 1}
+ after 50
+ }
+ return 0
+}
+
+# Check if current ::tags match requested tags. If ::allowtags are used,
+# there must be some intersection. If ::denytags are used, no intersection
+# is allowed. Returns 1 if tags are acceptable or 0 otherwise, in which
+# case err_return names a return variable for the message to be logged.
+proc tags_acceptable {tags err_return} {
+ upvar $err_return err
+
+ # If tags are whitelisted, make sure there's match
+ if {[llength $::allowtags] > 0} {
+ set matched 0
+ foreach tag $::allowtags {
+ if {[lsearch $tags $tag] >= 0} {
+ incr matched
+ }
+ }
+ if {$matched < 1} {
+ set err "Tag: none of the tags allowed"
+ return 0
+ }
+ }
+
+ foreach tag $::denytags {
+ if {[lsearch $tags $tag] >= 0} {
+ set err "Tag: $tag denied"
+ return 0
+ }
+ }
+
+ # some units mess with the client output buffer so we can't really use the req-res logging mechanism.
+ if {$::log_req_res && [lsearch $tags "logreqres:skip"] >= 0} {
+ set err "Not supported when running in log-req-res mode"
+ return 0
+ }
+
+ if {$::external && [lsearch $tags "external:skip"] >= 0} {
+ set err "Not supported on external server"
+ return 0
+ }
+
+ if {$::singledb && [lsearch $tags "singledb:skip"] >= 0} {
+ set err "Not supported on singledb"
+ return 0
+ }
+
+ if {$::cluster_mode && [lsearch $tags "cluster:skip"] >= 0} {
+ set err "Not supported in cluster mode"
+ return 0
+ }
+
+ if {$::tls && [lsearch $tags "tls:skip"] >= 0} {
+ set err "Not supported in tls mode"
+ return 0
+ }
+
+ if {!$::large_memory && [lsearch $tags "large-memory"] >= 0} {
+ set err "large memory flag not provided"
+ return 0
+ }
+
+ return 1
+}
+
+# doesn't really belong here, but highly coupled to code in start_server
+proc tags {tags code} {
+ # If we 'tags' contain multiple tags, quoted and separated by spaces,
+ # we want to get rid of the quotes in order to have a proper list
+ set tags [string map { \" "" } $tags]
+ set ::tags [concat $::tags $tags]
+ if {![tags_acceptable $::tags err]} {
+ incr ::num_aborted
+ send_data_packet $::test_server_fd ignore $err
+ set ::tags [lrange $::tags 0 end-[llength $tags]]
+ return
+ }
+ uplevel 1 $code
+ set ::tags [lrange $::tags 0 end-[llength $tags]]
+}
+
+# Write the configuration in the dictionary 'config' in the specified
+# file name.
+proc create_server_config_file {filename config config_lines} {
+ set fp [open $filename w+]
+ foreach directive [dict keys $config] {
+ puts -nonewline $fp "$directive "
+ puts $fp [dict get $config $directive]
+ }
+ foreach {config_line_directive config_line_args} $config_lines {
+ puts $fp "$config_line_directive $config_line_args"
+ }
+ close $fp
+}
+
+proc spawn_server {config_file stdout stderr args} {
+ set cmd [list src/redis-server $config_file]
+ set args {*}$args
+ if {[llength $args] > 0} {
+ lappend cmd {*}$args
+ }
+
+ if {$::valgrind} {
+ set pid [exec valgrind --track-origins=yes --trace-children=yes --suppressions=[pwd]/src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full {*}$cmd >> $stdout 2>> $stderr &]
+ } elseif ($::stack_logging) {
+ set pid [exec /usr/bin/env MallocStackLogging=1 MallocLogFile=/tmp/malloc_log.txt {*}$cmd >> $stdout 2>> $stderr &]
+ } else {
+ # ASAN_OPTIONS environment variable is for address sanitizer. If a test
+ # tries to allocate huge memory area and expects allocator to return
+ # NULL, address sanitizer throws an error without this setting.
+ set pid [exec /usr/bin/env ASAN_OPTIONS=allocator_may_return_null=1 {*}$cmd >> $stdout 2>> $stderr &]
+ }
+
+ if {$::wait_server} {
+ set msg "server started PID: $pid. press any key to continue..."
+ puts $msg
+ read stdin 1
+ }
+
+ # Tell the test server about this new instance.
+ send_data_packet $::test_server_fd server-spawned $pid
+ return $pid
+}
+
+# Wait for actual startup, return 1 if port is busy, 0 otherwise
+proc wait_server_started {config_file stdout pid} {
+ set checkperiod 100; # Milliseconds
+ set maxiter [expr {120*1000/$checkperiod}] ; # Wait up to 2 minutes.
+ set port_busy 0
+ while 1 {
+ if {[regexp -- " PID: $pid.*Server initialized" [exec cat $stdout]]} {
+ break
+ }
+ after $checkperiod
+ incr maxiter -1
+ if {$maxiter == 0} {
+ start_server_error $config_file "No PID detected in log $stdout"
+ puts "--- LOG CONTENT ---"
+ puts [exec cat $stdout]
+ puts "-------------------"
+ break
+ }
+
+ # Check if the port is actually busy and the server failed
+ # for this reason.
+ if {[regexp {Failed listening on port} [exec cat $stdout]]} {
+ set port_busy 1
+ break
+ }
+ }
+ return $port_busy
+}
+
+proc dump_server_log {srv} {
+ set pid [dict get $srv "pid"]
+ puts "\n===== Start of server log (pid $pid) =====\n"
+ puts [exec cat [dict get $srv "stdout"]]
+ puts "===== End of server log (pid $pid) =====\n"
+
+ puts "\n===== Start of server stderr log (pid $pid) =====\n"
+ puts [exec cat [dict get $srv "stderr"]]
+ puts "===== End of server stderr log (pid $pid) =====\n"
+}
+
+proc run_external_server_test {code overrides} {
+ set srv {}
+ dict set srv "host" $::host
+ dict set srv "port" $::port
+ set client [redis $::host $::port 0 $::tls]
+ dict set srv "client" $client
+ if {!$::singledb} {
+ $client select 9
+ }
+
+ set config {}
+ dict set config "port" $::port
+ dict set srv "config" $config
+
+ # append the server to the stack
+ lappend ::servers $srv
+
+ if {[llength $::servers] > 1} {
+ if {$::verbose} {
+ puts "Notice: nested start_server statements in external server mode, test must be aware of that!"
+ }
+ }
+
+ r flushall
+ r function flush
+
+ # store overrides
+ set saved_config {}
+ foreach {param val} $overrides {
+ dict set saved_config $param [lindex [r config get $param] 1]
+ r config set $param $val
+
+ # If we enable appendonly, wait for for rewrite to complete. This is
+ # required for tests that begin with a bg* command which will fail if
+ # the rewriteaof operation is not completed at this point.
+ if {$param == "appendonly" && $val == "yes"} {
+ waitForBgrewriteaof r
+ }
+ }
+
+ if {[catch {set retval [uplevel 2 $code]} error]} {
+ if {$::durable} {
+ set msg [string range $error 10 end]
+ lappend details $msg
+ lappend details $::errorInfo
+ lappend ::tests_failed $details
+
+ incr ::num_failed
+ send_data_packet $::test_server_fd err [join $details "\n"]
+ } else {
+ # Re-raise, let handler up the stack take care of this.
+ error $error $::errorInfo
+ }
+ }
+
+ # restore overrides
+ dict for {param val} $saved_config {
+ r config set $param $val
+ }
+
+ set srv [lpop ::servers]
+
+ if {[dict exists $srv "client"]} {
+ [dict get $srv "client"] close
+ }
+}
+
+proc start_server {options {code undefined}} {
+ # setup defaults
+ set baseconfig "default.conf"
+ set overrides {}
+ set omit {}
+ set tags {}
+ set args {}
+ set keep_persistence false
+ set config_lines {}
+
+ # parse options
+ foreach {option value} $options {
+ switch $option {
+ "config" {
+ set baseconfig $value
+ }
+ "overrides" {
+ set overrides [concat $overrides $value]
+ }
+ "config_lines" {
+ set config_lines $value
+ }
+ "args" {
+ set args $value
+ }
+ "omit" {
+ set omit $value
+ }
+ "tags" {
+ # If we 'tags' contain multiple tags, quoted and separated by spaces,
+ # we want to get rid of the quotes in order to have a proper list
+ set tags [string map { \" "" } $value]
+ set ::tags [concat $::tags $tags]
+ }
+ "keep_persistence" {
+ set keep_persistence $value
+ }
+ default {
+ error "Unknown option $option"
+ }
+ }
+ }
+
+ # We skip unwanted tags
+ if {![tags_acceptable $::tags err]} {
+ incr ::num_aborted
+ send_data_packet $::test_server_fd ignore $err
+ set ::tags [lrange $::tags 0 end-[llength $tags]]
+ return
+ }
+
+ # If we are running against an external server, we just push the
+ # host/port pair in the stack the first time
+ if {$::external} {
+ run_external_server_test $code $overrides
+
+ set ::tags [lrange $::tags 0 end-[llength $tags]]
+ return
+ }
+
+ set data [split [exec cat "tests/assets/$baseconfig"] "\n"]
+ set config {}
+ if {$::tls} {
+ if {$::tls_module} {
+ lappend config_lines [list "loadmodule" [format "%s/src/redis-tls.so" [pwd]]]
+ }
+ dict set config "tls-cert-file" [format "%s/tests/tls/server.crt" [pwd]]
+ dict set config "tls-key-file" [format "%s/tests/tls/server.key" [pwd]]
+ dict set config "tls-client-cert-file" [format "%s/tests/tls/client.crt" [pwd]]
+ dict set config "tls-client-key-file" [format "%s/tests/tls/client.key" [pwd]]
+ dict set config "tls-dh-params-file" [format "%s/tests/tls/redis.dh" [pwd]]
+ dict set config "tls-ca-cert-file" [format "%s/tests/tls/ca.crt" [pwd]]
+ dict set config "loglevel" "debug"
+ }
+ foreach line $data {
+ if {[string length $line] > 0 && [string index $line 0] ne "#"} {
+ set elements [split $line " "]
+ set directive [lrange $elements 0 0]
+ set arguments [lrange $elements 1 end]
+ dict set config $directive $arguments
+ }
+ }
+
+ # use a different directory every time a server is started
+ dict set config dir [tmpdir server]
+
+ # start every server on a different port
+ set port [find_available_port $::baseport $::portcount]
+ if {$::tls} {
+ set pport [find_available_port $::baseport $::portcount]
+ dict set config "port" $pport
+ dict set config "tls-port" $port
+ dict set config "tls-cluster" "yes"
+ dict set config "tls-replication" "yes"
+ } else {
+ dict set config port $port
+ }
+
+ set unixsocket [file normalize [format "%s/%s" [dict get $config "dir"] "socket"]]
+ dict set config "unixsocket" $unixsocket
+
+ # apply overrides from global space and arguments
+ foreach {directive arguments} [concat $::global_overrides $overrides] {
+ dict set config $directive $arguments
+ }
+
+ # remove directives that are marked to be omitted
+ foreach directive $omit {
+ dict unset config $directive
+ }
+
+ if {$::log_req_res} {
+ dict set config "req-res-logfile" "stdout.reqres"
+ }
+
+ if {$::force_resp3} {
+ dict set config "client-default-resp" "3"
+ }
+
+ # write new configuration to temporary file
+ set config_file [tmpfile redis.conf]
+ create_server_config_file $config_file $config $config_lines
+
+ set stdout [format "%s/%s" [dict get $config "dir"] "stdout"]
+ set stderr [format "%s/%s" [dict get $config "dir"] "stderr"]
+
+ # if we're inside a test, write the test name to the server log file
+ if {[info exists ::cur_test]} {
+ set fd [open $stdout "a+"]
+ puts $fd "### Starting server for test $::cur_test"
+ close $fd
+ if {$::verbose > 1} {
+ puts "### Starting server $stdout for test - $::cur_test"
+ }
+ }
+
+ # We may have a stdout left over from the previous tests, so we need
+ # to get the current count of ready logs
+ set previous_ready_count [count_message_lines $stdout "Ready to accept"]
+
+ # We need a loop here to retry with different ports.
+ set server_started 0
+ while {$server_started == 0} {
+ if {$::verbose} {
+ puts -nonewline "=== ($tags) Starting server ${::host}:${port} "
+ }
+
+ send_data_packet $::test_server_fd "server-spawning" "port $port"
+
+ set pid [spawn_server $config_file $stdout $stderr $args]
+
+ # check that the server actually started
+ set port_busy [wait_server_started $config_file $stdout $pid]
+
+ # Sometimes we have to try a different port, even if we checked
+ # for availability. Other test clients may grab the port before we
+ # are able to do it for example.
+ if {$port_busy} {
+ puts "Port $port was already busy, trying another port..."
+ set port [find_available_port $::baseport $::portcount]
+ if {$::tls} {
+ set pport [find_available_port $::baseport $::portcount]
+ dict set config port $pport
+ dict set config "tls-port" $port
+ } else {
+ dict set config port $port
+ }
+ create_server_config_file $config_file $config $config_lines
+
+ # Truncate log so wait_server_started will not be looking at
+ # output of the failed server.
+ close [open $stdout "w"]
+
+ continue; # Try again
+ }
+
+ if {$::valgrind} {set retrynum 1000} else {set retrynum 100}
+ if {$code ne "undefined"} {
+ set serverisup [server_is_up $::host $port $retrynum]
+ } else {
+ set serverisup 1
+ }
+
+ if {$::verbose} {
+ puts ""
+ }
+
+ if {!$serverisup} {
+ set err {}
+ append err [exec cat $stdout] "\n" [exec cat $stderr]
+ start_server_error $config_file $err
+ return
+ }
+ set server_started 1
+ }
+
+ # setup properties to be able to initialize a client object
+ set port_param [expr $::tls ? {"tls-port"} : {"port"}]
+ set host $::host
+ if {[dict exists $config bind]} { set host [dict get $config bind] }
+ if {[dict exists $config $port_param]} { set port [dict get $config $port_param] }
+
+ # setup config dict
+ dict set srv "config_file" $config_file
+ dict set srv "config" $config
+ dict set srv "pid" $pid
+ dict set srv "host" $host
+ dict set srv "port" $port
+ dict set srv "stdout" $stdout
+ dict set srv "stderr" $stderr
+ dict set srv "unixsocket" $unixsocket
+ if {$::tls} {
+ dict set srv "pport" $pport
+ }
+
+ # if a block of code is supplied, we wait for the server to become
+ # available, create a client object and kill the server afterwards
+ if {$code ne "undefined"} {
+ set line [exec head -n1 $stdout]
+ if {[string match {*already in use*} $line]} {
+ error_and_quit $config_file $line
+ }
+
+ while 1 {
+ # check that the server actually started and is ready for connections
+ if {[count_message_lines $stdout "Ready to accept"] > $previous_ready_count} {
+ break
+ }
+ after 10
+ }
+
+ # append the server to the stack
+ lappend ::servers $srv
+
+ # connect client (after server dict is put on the stack)
+ reconnect
+
+ # remember previous num_failed to catch new errors
+ set prev_num_failed $::num_failed
+
+ # execute provided block
+ set num_tests $::num_tests
+ if {[catch { uplevel 1 $code } error]} {
+ set backtrace $::errorInfo
+ set assertion [string match "assertion:*" $error]
+
+ # fetch srv back from the server list, in case it was restarted by restart_server (new PID)
+ set srv [lindex $::servers end]
+
+ # pop the server object
+ set ::servers [lrange $::servers 0 end-1]
+
+ # Kill the server without checking for leaks
+ dict set srv "skipleaks" 1
+ kill_server $srv
+
+ if {$::dump_logs && $assertion} {
+ # if we caught an assertion ($::num_failed isn't incremented yet)
+ # this happens when the test spawns a server and not the other way around
+ dump_server_log $srv
+ } else {
+ # Print crash report from log
+ set crashlog [crashlog_from_file [dict get $srv "stdout"]]
+ if {[string length $crashlog] > 0} {
+ puts [format "\nLogged crash report (pid %d):" [dict get $srv "pid"]]
+ puts "$crashlog"
+ puts ""
+ }
+
+ set sanitizerlog [sanitizer_errors_from_file [dict get $srv "stderr"]]
+ if {[string length $sanitizerlog] > 0} {
+ puts [format "\nLogged sanitizer errors (pid %d):" [dict get $srv "pid"]]
+ puts "$sanitizerlog"
+ puts ""
+ }
+ }
+
+ if {!$assertion && $::durable} {
+ # durable is meant to prevent the whole tcl test from exiting on
+ # an exception. an assertion will be caught by the test proc.
+ set msg [string range $error 10 end]
+ lappend details $msg
+ lappend details $backtrace
+ lappend ::tests_failed $details
+
+ incr ::num_failed
+ send_data_packet $::test_server_fd err [join $details "\n"]
+ } else {
+ # Re-raise, let handler up the stack take care of this.
+ error $error $backtrace
+ }
+ } else {
+ if {$::dump_logs && $prev_num_failed != $::num_failed} {
+ dump_server_log $srv
+ }
+ }
+
+ # fetch srv back from the server list, in case it was restarted by restart_server (new PID)
+ set srv [lindex $::servers end]
+
+ # Don't do the leak check when no tests were run
+ if {$num_tests == $::num_tests} {
+ dict set srv "skipleaks" 1
+ }
+
+ # pop the server object
+ set ::servers [lrange $::servers 0 end-1]
+
+ set ::tags [lrange $::tags 0 end-[llength $tags]]
+ kill_server $srv
+ if {!$keep_persistence} {
+ clean_persistence $srv
+ }
+ set _ ""
+ } else {
+ set ::tags [lrange $::tags 0 end-[llength $tags]]
+ set _ $srv
+ }
+}
+
+# Start multiple servers with the same options, run code, then stop them.
+proc start_multiple_servers {num options code} {
+ for {set i 0} {$i < $num} {incr i} {
+ set code [list start_server $options $code]
+ }
+ uplevel 1 $code
+}
+
+proc restart_server {level wait_ready rotate_logs {reconnect 1} {shutdown sigterm}} {
+ set srv [lindex $::servers end+$level]
+ if {$shutdown ne {sigterm}} {
+ catch {[dict get $srv "client"] shutdown $shutdown}
+ }
+ # Kill server doesn't mind if the server is already dead
+ kill_server $srv
+ # Remove the default client from the server
+ dict unset srv "client"
+
+ set pid [dict get $srv "pid"]
+ set stdout [dict get $srv "stdout"]
+ set stderr [dict get $srv "stderr"]
+ if {$rotate_logs} {
+ set ts [clock format [clock seconds] -format %y%m%d%H%M%S]
+ file rename $stdout $stdout.$ts.$pid
+ file rename $stderr $stderr.$ts.$pid
+ }
+ set prev_ready_count [count_message_lines $stdout "Ready to accept"]
+
+ # if we're inside a test, write the test name to the server log file
+ if {[info exists ::cur_test]} {
+ set fd [open $stdout "a+"]
+ puts $fd "### Restarting server for test $::cur_test"
+ close $fd
+ }
+
+ set config_file [dict get $srv "config_file"]
+
+ set pid [spawn_server $config_file $stdout $stderr {}]
+
+ # check that the server actually started
+ wait_server_started $config_file $stdout $pid
+
+ # update the pid in the servers list
+ dict set srv "pid" $pid
+ # re-set $srv in the servers list
+ lset ::servers end+$level $srv
+
+ if {$wait_ready} {
+ while 1 {
+ # check that the server actually started and is ready for connections
+ if {[count_message_lines $stdout "Ready to accept"] > $prev_ready_count} {
+ break
+ }
+ after 10
+ }
+ }
+ if {$reconnect} {
+ reconnect $level
+ }
+}
diff --git a/tests/support/test.tcl b/tests/support/test.tcl
new file mode 100644
index 0000000..b7cd38b
--- /dev/null
+++ b/tests/support/test.tcl
@@ -0,0 +1,267 @@
+set ::num_tests 0
+set ::num_passed 0
+set ::num_failed 0
+set ::num_skipped 0
+set ::num_aborted 0
+set ::tests_failed {}
+set ::cur_test ""
+
+proc fail {msg} {
+ error "assertion:$msg"
+}
+
+proc assert {condition} {
+ if {![uplevel 1 [list expr $condition]]} {
+ set context "(context: [info frame -1])"
+ error "assertion:Expected [uplevel 1 [list subst -nocommands $condition]] $context"
+ }
+}
+
+proc assert_no_match {pattern value} {
+ if {[string match $pattern $value]} {
+ set context "(context: [info frame -1])"
+ error "assertion:Expected '$value' to not match '$pattern' $context"
+ }
+}
+
+proc assert_match {pattern value {detail ""} {context ""}} {
+ if {![string match $pattern $value]} {
+ if {$context eq ""} {
+ set context "(context: [info frame -1])"
+ }
+ error "assertion:Expected '$value' to match '$pattern' $context $detail"
+ }
+}
+
+proc assert_failed {expected_err detail} {
+ if {$detail ne ""} {
+ set detail "(detail: $detail)"
+ } else {
+ set detail "(context: [info frame -2])"
+ }
+ error "assertion:$expected_err $detail"
+}
+
+proc assert_not_equal {value expected {detail ""}} {
+ if {!($expected ne $value)} {
+ assert_failed "Expected '$value' not equal to '$expected'" $detail
+ }
+}
+
+proc assert_equal {value expected {detail ""}} {
+ if {$expected ne $value} {
+ assert_failed "Expected '$value' to be equal to '$expected'" $detail
+ }
+}
+
+proc assert_lessthan {value expected {detail ""}} {
+ if {!($value < $expected)} {
+ assert_failed "Expected '$value' to be less than '$expected'" $detail
+ }
+}
+
+proc assert_lessthan_equal {value expected {detail ""}} {
+ if {!($value <= $expected)} {
+ assert_failed "Expected '$value' to be less than or equal to '$expected'" $detail
+ }
+}
+
+proc assert_morethan {value expected {detail ""}} {
+ if {!($value > $expected)} {
+ assert_failed "Expected '$value' to be more than '$expected'" $detail
+ }
+}
+
+proc assert_morethan_equal {value expected {detail ""}} {
+ if {!($value >= $expected)} {
+ assert_failed "Expected '$value' to be more than or equal to '$expected'" $detail
+ }
+}
+
+proc assert_range {value min max {detail ""}} {
+ if {!($value <= $max && $value >= $min)} {
+ assert_failed "Expected '$value' to be between to '$min' and '$max'" $detail
+ }
+}
+
+proc assert_error {pattern code {detail ""}} {
+ if {[catch {uplevel 1 $code} error]} {
+ assert_match $pattern $error $detail
+ } else {
+ assert_failed "Expected an error matching '$pattern' but got '$error'" $detail
+ }
+}
+
+proc assert_encoding {enc key} {
+ if {$::ignoreencoding} {
+ return
+ }
+ set val [r object encoding $key]
+ assert_match $enc $val
+}
+
+proc assert_type {type key} {
+ assert_equal $type [r type $key]
+}
+
+proc assert_refcount {ref key} {
+ if {[lsearch $::denytags "needs:debug"] >= 0} {
+ return
+ }
+
+ set val [r object refcount $key]
+ assert_equal $ref $val
+}
+
+proc assert_refcount_morethan {key ref} {
+ if {[lsearch $::denytags "needs:debug"] >= 0} {
+ return
+ }
+
+ set val [r object refcount $key]
+ assert_morethan $val $ref
+}
+
+# Wait for the specified condition to be true, with the specified number of
+# max retries and delay between retries. Otherwise the 'elsescript' is
+# executed.
+proc wait_for_condition {maxtries delay e _else_ elsescript} {
+ while {[incr maxtries -1] >= 0} {
+ set errcode [catch {uplevel 1 [list expr $e]} result]
+ if {$errcode == 0} {
+ if {$result} break
+ } else {
+ return -code $errcode $result
+ }
+ after $delay
+ }
+ if {$maxtries == -1} {
+ set errcode [catch [uplevel 1 $elsescript] result]
+ return -code $errcode $result
+ }
+}
+
+# try to match a value to a list of patterns that are either regex (starts with "/") or plain string.
+# The caller can specify to use only glob-pattern match
+proc search_pattern_list {value pattern_list {glob_pattern false}} {
+ foreach el $pattern_list {
+ if {[string length $el] == 0} { continue }
+ if { $glob_pattern } {
+ if {[string match $el $value]} {
+ return 1
+ }
+ continue
+ }
+ if {[string equal / [string index $el 0]] && [regexp -- [string range $el 1 end] $value]} {
+ return 1
+ } elseif {[string equal $el $value]} {
+ return 1
+ }
+ }
+ return 0
+}
+
+proc test {name code {okpattern undefined} {tags {}}} {
+ # abort if test name in skiptests
+ if {[search_pattern_list $name $::skiptests]} {
+ incr ::num_skipped
+ send_data_packet $::test_server_fd skip $name
+ return
+ }
+ if {$::verbose > 1} {
+ puts "starting test $name"
+ }
+ # abort if only_tests was set but test name is not included
+ if {[llength $::only_tests] > 0 && ![search_pattern_list $name $::only_tests]} {
+ incr ::num_skipped
+ send_data_packet $::test_server_fd skip $name
+ return
+ }
+
+ set tags [concat $::tags $tags]
+ if {![tags_acceptable $tags err]} {
+ incr ::num_aborted
+ send_data_packet $::test_server_fd ignore "$name: $err"
+ return
+ }
+
+ incr ::num_tests
+ set details {}
+ lappend details "$name in $::curfile"
+
+ # set a cur_test global to be logged into new servers that are spawn
+ # and log the test name in all existing servers
+ set prev_test $::cur_test
+ set ::cur_test "$name in $::curfile"
+ if {$::external} {
+ catch {
+ set r [redis [srv 0 host] [srv 0 port] 0 $::tls]
+ catch {
+ $r debug log "### Starting test $::cur_test"
+ }
+ $r close
+ }
+ } else {
+ set servers {}
+ foreach srv $::servers {
+ set stdout [dict get $srv stdout]
+ set fd [open $stdout "a+"]
+ puts $fd "### Starting test $::cur_test"
+ close $fd
+ lappend servers $stdout
+ }
+ if {$::verbose > 1} {
+ puts "### Starting test $::cur_test - with servers: $servers"
+ }
+ }
+
+ send_data_packet $::test_server_fd testing $name
+
+ set test_start_time [clock milliseconds]
+ if {[catch {set retval [uplevel 1 $code]} error]} {
+ set assertion [string match "assertion:*" $error]
+ if {$assertion || $::durable} {
+ # durable prevents the whole tcl test from exiting on an exception.
+ # an assertion is handled gracefully anyway.
+ set msg [string range $error 10 end]
+ lappend details $msg
+ if {!$assertion} {
+ lappend details $::errorInfo
+ }
+ lappend ::tests_failed $details
+
+ incr ::num_failed
+ send_data_packet $::test_server_fd err [join $details "\n"]
+
+ if {$::stop_on_failure} {
+ puts "Test error (last server port:[srv port], log:[srv stdout]), press enter to teardown the test."
+ flush stdout
+ gets stdin
+ }
+ } else {
+ # Re-raise, let handler up the stack take care of this.
+ error $error $::errorInfo
+ }
+ } else {
+ if {$okpattern eq "undefined" || $okpattern eq $retval || [string match $okpattern $retval]} {
+ incr ::num_passed
+ set elapsed [expr {[clock milliseconds]-$test_start_time}]
+ send_data_packet $::test_server_fd ok $name $elapsed
+ } else {
+ set msg "Expected '$okpattern' to equal or match '$retval'"
+ lappend details $msg
+ lappend ::tests_failed $details
+
+ incr ::num_failed
+ send_data_packet $::test_server_fd err [join $details "\n"]
+ }
+ }
+
+ if {$::traceleaks} {
+ set output [exec leaks redis-server]
+ if {![string match {*0 leaks*} $output]} {
+ send_data_packet $::test_server_fd err "Detected a memory leak in test '$name': $output"
+ }
+ }
+ set ::cur_test $prev_test
+}
diff --git a/tests/support/tmpfile.tcl b/tests/support/tmpfile.tcl
new file mode 100644
index 0000000..809f587
--- /dev/null
+++ b/tests/support/tmpfile.tcl
@@ -0,0 +1,15 @@
+set ::tmpcounter 0
+set ::tmproot "./tests/tmp"
+file mkdir $::tmproot
+
+# returns a dirname unique to this process to write to
+proc tmpdir {basename} {
+ set dir [file join $::tmproot $basename.[pid].[incr ::tmpcounter]]
+ file mkdir $dir
+ set _ $dir
+}
+
+# return a filename unique to this process to write to
+proc tmpfile {basename} {
+ file join $::tmproot $basename.[pid].[incr ::tmpcounter]
+}
diff --git a/tests/support/util.tcl b/tests/support/util.tcl
new file mode 100644
index 0000000..8941d1a
--- /dev/null
+++ b/tests/support/util.tcl
@@ -0,0 +1,1117 @@
+proc randstring {min max {type binary}} {
+ set len [expr {$min+int(rand()*($max-$min+1))}]
+ set output {}
+ if {$type eq {binary}} {
+ set minval 0
+ set maxval 255
+ } elseif {$type eq {alpha} || $type eq {simplealpha}} {
+ set minval 48
+ set maxval 122
+ } elseif {$type eq {compr}} {
+ set minval 48
+ set maxval 52
+ }
+ while {$len} {
+ set num [expr {$minval+int(rand()*($maxval-$minval+1))}]
+ set rr [format "%c" $num]
+ if {$type eq {simplealpha} && ![string is alnum $rr]} {continue}
+ if {$type eq {alpha} && $num eq 92} {continue} ;# avoid putting '\' char in the string, it can mess up TCL processing
+ append output $rr
+ incr len -1
+ }
+ return $output
+}
+
+# Useful for some test
+proc zlistAlikeSort {a b} {
+ if {[lindex $a 0] > [lindex $b 0]} {return 1}
+ if {[lindex $a 0] < [lindex $b 0]} {return -1}
+ string compare [lindex $a 1] [lindex $b 1]
+}
+
+# Return all log lines starting with the first line that contains a warning.
+# Generally, this will be an assertion error with a stack trace.
+proc crashlog_from_file {filename} {
+ set lines [split [exec cat $filename] "\n"]
+ set matched 0
+ set logall 0
+ set result {}
+ foreach line $lines {
+ if {[string match {*REDIS BUG REPORT START*} $line]} {
+ set logall 1
+ }
+ if {[regexp {^\[\d+\]\s+\d+\s+\w+\s+\d{2}:\d{2}:\d{2} \#} $line]} {
+ set matched 1
+ }
+ if {$logall || $matched} {
+ lappend result $line
+ }
+ }
+ join $result "\n"
+}
+
+# Return sanitizer log lines
+proc sanitizer_errors_from_file {filename} {
+ set log [exec cat $filename]
+ set lines [split [exec cat $filename] "\n"]
+
+ foreach line $lines {
+ # Ignore huge allocation warnings
+ if ([string match {*WARNING: AddressSanitizer failed to allocate*} $line]) {
+ continue
+ }
+
+ # GCC UBSAN output does not contain 'Sanitizer' but 'runtime error'.
+ if {[string match {*runtime error*} $line] ||
+ [string match {*Sanitizer*} $line]} {
+ return $log
+ }
+ }
+
+ return ""
+}
+
+proc getInfoProperty {infostr property} {
+ if {[regexp -lineanchor "^$property:(.*?)\r\n" $infostr _ value]} {
+ return $value
+ }
+}
+
+# Return value for INFO property
+proc status {r property} {
+ set _ [getInfoProperty [{*}$r info] $property]
+}
+
+proc waitForBgsave r {
+ while 1 {
+ if {[status $r rdb_bgsave_in_progress] eq 1} {
+ if {$::verbose} {
+ puts -nonewline "\nWaiting for background save to finish... "
+ flush stdout
+ }
+ after 50
+ } else {
+ break
+ }
+ }
+}
+
+proc waitForBgrewriteaof r {
+ while 1 {
+ if {[status $r aof_rewrite_in_progress] eq 1} {
+ if {$::verbose} {
+ puts -nonewline "\nWaiting for background AOF rewrite to finish... "
+ flush stdout
+ }
+ after 50
+ } else {
+ break
+ }
+ }
+}
+
+proc wait_for_sync r {
+ wait_for_condition 50 100 {
+ [status $r master_link_status] eq "up"
+ } else {
+ fail "replica didn't sync in time"
+ }
+}
+
+proc wait_replica_online r {
+ wait_for_condition 50 100 {
+ [string match "*slave0:*,state=online*" [$r info replication]]
+ } else {
+ fail "replica didn't online in time"
+ }
+}
+
+proc wait_for_ofs_sync {r1 r2} {
+ wait_for_condition 50 100 {
+ [status $r1 master_repl_offset] eq [status $r2 master_repl_offset]
+ } else {
+ fail "replica offset didn't match in time"
+ }
+}
+
+proc wait_done_loading r {
+ wait_for_condition 50 100 {
+ [catch {$r ping} e] == 0
+ } else {
+ fail "Loading DB is taking too much time."
+ }
+}
+
+proc wait_lazyfree_done r {
+ wait_for_condition 50 100 {
+ [status $r lazyfree_pending_objects] == 0
+ } else {
+ fail "lazyfree isn't done"
+ }
+}
+
+# count current log lines in server's stdout
+proc count_log_lines {srv_idx} {
+ set _ [string trim [exec wc -l < [srv $srv_idx stdout]]]
+}
+
+# returns the number of times a line with that pattern appears in a file
+proc count_message_lines {file pattern} {
+ set res 0
+ # exec fails when grep exists with status other than 0 (when the pattern wasn't found)
+ catch {
+ set res [string trim [exec grep $pattern $file 2> /dev/null | wc -l]]
+ }
+ return $res
+}
+
+# returns the number of times a line with that pattern appears in the log
+proc count_log_message {srv_idx pattern} {
+ set stdout [srv $srv_idx stdout]
+ return [count_message_lines $stdout $pattern]
+}
+
+# verify pattern exists in server's sdtout after a certain line number
+proc verify_log_message {srv_idx pattern from_line} {
+ incr from_line
+ set result [exec tail -n +$from_line < [srv $srv_idx stdout]]
+ if {![string match $pattern $result]} {
+ error "assertion:expected message not found in log file: $pattern"
+ }
+}
+
+# wait for pattern to be found in server's stdout after certain line number
+# return value is a list containing the line that matched the pattern and the line number
+proc wait_for_log_messages {srv_idx patterns from_line maxtries delay} {
+ set retry $maxtries
+ set next_line [expr $from_line + 1] ;# searching form the line after
+ set stdout [srv $srv_idx stdout]
+ while {$retry} {
+ # re-read the last line (unless it's before to our first), last time we read it, it might have been incomplete
+ set next_line [expr $next_line - 1 > $from_line + 1 ? $next_line - 1 : $from_line + 1]
+ set result [exec tail -n +$next_line < $stdout]
+ set result [split $result "\n"]
+ foreach line $result {
+ foreach pattern $patterns {
+ if {[string match $pattern $line]} {
+ return [list $line $next_line]
+ }
+ }
+ incr next_line
+ }
+ incr retry -1
+ after $delay
+ }
+ if {$retry == 0} {
+ if {$::verbose} {
+ puts "content of $stdout from line: $from_line:"
+ puts [exec tail -n +$from_line < $stdout]
+ }
+ fail "log message of '$patterns' not found in $stdout after line: $from_line till line: [expr $next_line -1]"
+ }
+}
+
+# write line to server log file
+proc write_log_line {srv_idx msg} {
+ set logfile [srv $srv_idx stdout]
+ set fd [open $logfile "a+"]
+ puts $fd "### $msg"
+ close $fd
+}
+
+# Random integer between 0 and max (excluded).
+proc randomInt {max} {
+ expr {int(rand()*$max)}
+}
+
+# Random integer between min and max (excluded).
+proc randomRange {min max} {
+ expr {int(rand()*[expr $max - $min]) + $min}
+}
+
+# Random signed integer between -max and max (both extremes excluded).
+proc randomSignedInt {max} {
+ set i [randomInt $max]
+ if {rand() > 0.5} {
+ set i -$i
+ }
+ return $i
+}
+
+proc randpath args {
+ set path [expr {int(rand()*[llength $args])}]
+ uplevel 1 [lindex $args $path]
+}
+
+proc randomValue {} {
+ randpath {
+ # Small enough to likely collide
+ randomSignedInt 1000
+ } {
+ # 32 bit compressible signed/unsigned
+ randpath {randomSignedInt 2000000000} {randomSignedInt 4000000000}
+ } {
+ # 64 bit
+ randpath {randomSignedInt 1000000000000}
+ } {
+ # Random string
+ randpath {randstring 0 256 alpha} \
+ {randstring 0 256 compr} \
+ {randstring 0 256 binary}
+ }
+}
+
+proc randomKey {} {
+ randpath {
+ # Small enough to likely collide
+ randomInt 1000
+ } {
+ # 32 bit compressible signed/unsigned
+ randpath {randomInt 2000000000} {randomInt 4000000000}
+ } {
+ # 64 bit
+ randpath {randomInt 1000000000000}
+ } {
+ # Random string
+ randpath {randstring 1 256 alpha} \
+ {randstring 1 256 compr}
+ }
+}
+
+proc findKeyWithType {r type} {
+ for {set j 0} {$j < 20} {incr j} {
+ set k [{*}$r randomkey]
+ if {$k eq {}} {
+ return {}
+ }
+ if {[{*}$r type $k] eq $type} {
+ return $k
+ }
+ }
+ return {}
+}
+
+proc createComplexDataset {r ops {opt {}}} {
+ set useexpire [expr {[lsearch -exact $opt useexpire] != -1}]
+ if {[lsearch -exact $opt usetag] != -1} {
+ set tag "{t}"
+ } else {
+ set tag ""
+ }
+ for {set j 0} {$j < $ops} {incr j} {
+ set k [randomKey]$tag
+ set k2 [randomKey]$tag
+ set f [randomValue]
+ set v [randomValue]
+
+ if {$useexpire} {
+ if {rand() < 0.1} {
+ {*}$r expire [randomKey] [randomInt 2]
+ }
+ }
+
+ randpath {
+ set d [expr {rand()}]
+ } {
+ set d [expr {rand()}]
+ } {
+ set d [expr {rand()}]
+ } {
+ set d [expr {rand()}]
+ } {
+ set d [expr {rand()}]
+ } {
+ randpath {set d +inf} {set d -inf}
+ }
+ set t [{*}$r type $k]
+
+ if {$t eq {none}} {
+ randpath {
+ {*}$r set $k $v
+ } {
+ {*}$r lpush $k $v
+ } {
+ {*}$r sadd $k $v
+ } {
+ {*}$r zadd $k $d $v
+ } {
+ {*}$r hset $k $f $v
+ } {
+ {*}$r del $k
+ }
+ set t [{*}$r type $k]
+ }
+
+ switch $t {
+ {string} {
+ # Nothing to do
+ }
+ {list} {
+ randpath {{*}$r lpush $k $v} \
+ {{*}$r rpush $k $v} \
+ {{*}$r lrem $k 0 $v} \
+ {{*}$r rpop $k} \
+ {{*}$r lpop $k}
+ }
+ {set} {
+ randpath {{*}$r sadd $k $v} \
+ {{*}$r srem $k $v} \
+ {
+ set otherset [findKeyWithType {*}$r set]
+ if {$otherset ne {}} {
+ randpath {
+ {*}$r sunionstore $k2 $k $otherset
+ } {
+ {*}$r sinterstore $k2 $k $otherset
+ } {
+ {*}$r sdiffstore $k2 $k $otherset
+ }
+ }
+ }
+ }
+ {zset} {
+ randpath {{*}$r zadd $k $d $v} \
+ {{*}$r zrem $k $v} \
+ {
+ set otherzset [findKeyWithType {*}$r zset]
+ if {$otherzset ne {}} {
+ randpath {
+ {*}$r zunionstore $k2 2 $k $otherzset
+ } {
+ {*}$r zinterstore $k2 2 $k $otherzset
+ }
+ }
+ }
+ }
+ {hash} {
+ randpath {{*}$r hset $k $f $v} \
+ {{*}$r hdel $k $f}
+ }
+ }
+ }
+}
+
+proc formatCommand {args} {
+ set cmd "*[llength $args]\r\n"
+ foreach a $args {
+ append cmd "$[string length $a]\r\n$a\r\n"
+ }
+ set _ $cmd
+}
+
+proc csvdump r {
+ set o {}
+ if {$::singledb} {
+ set maxdb 1
+ } else {
+ set maxdb 16
+ }
+ for {set db 0} {$db < $maxdb} {incr db} {
+ if {!$::singledb} {
+ {*}$r select $db
+ }
+ foreach k [lsort [{*}$r keys *]] {
+ set type [{*}$r type $k]
+ append o [csvstring $db] , [csvstring $k] , [csvstring $type] ,
+ switch $type {
+ string {
+ append o [csvstring [{*}$r get $k]] "\n"
+ }
+ list {
+ foreach e [{*}$r lrange $k 0 -1] {
+ append o [csvstring $e] ,
+ }
+ append o "\n"
+ }
+ set {
+ foreach e [lsort [{*}$r smembers $k]] {
+ append o [csvstring $e] ,
+ }
+ append o "\n"
+ }
+ zset {
+ foreach e [{*}$r zrange $k 0 -1 withscores] {
+ append o [csvstring $e] ,
+ }
+ append o "\n"
+ }
+ hash {
+ set fields [{*}$r hgetall $k]
+ set newfields {}
+ foreach {k v} $fields {
+ lappend newfields [list $k $v]
+ }
+ set fields [lsort -index 0 $newfields]
+ foreach kv $fields {
+ append o [csvstring [lindex $kv 0]] ,
+ append o [csvstring [lindex $kv 1]] ,
+ }
+ append o "\n"
+ }
+ }
+ }
+ }
+ if {!$::singledb} {
+ {*}$r select 9
+ }
+ return $o
+}
+
+proc csvstring s {
+ return "\"$s\""
+}
+
+proc roundFloat f {
+ format "%.10g" $f
+}
+
+set ::last_port_attempted 0
+proc find_available_port {start count} {
+ set port [expr $::last_port_attempted + 1]
+ for {set attempts 0} {$attempts < $count} {incr attempts} {
+ if {$port < $start || $port >= $start+$count} {
+ set port $start
+ }
+ set fd1 -1
+ if {[catch {set fd1 [socket -server 127.0.0.1 $port]}] ||
+ [catch {set fd2 [socket -server 127.0.0.1 [expr $port+10000]]}]} {
+ if {$fd1 != -1} {
+ close $fd1
+ }
+ } else {
+ close $fd1
+ close $fd2
+ set ::last_port_attempted $port
+ return $port
+ }
+ incr port
+ }
+ error "Can't find a non busy port in the $start-[expr {$start+$count-1}] range."
+}
+
+# Test if TERM looks like to support colors
+proc color_term {} {
+ expr {[info exists ::env(TERM)] && [string match *xterm* $::env(TERM)]}
+}
+
+proc colorstr {color str} {
+ if {[color_term]} {
+ set b 0
+ if {[string range $color 0 4] eq {bold-}} {
+ set b 1
+ set color [string range $color 5 end]
+ }
+ switch $color {
+ red {set colorcode {31}}
+ green {set colorcode {32}}
+ yellow {set colorcode {33}}
+ blue {set colorcode {34}}
+ magenta {set colorcode {35}}
+ cyan {set colorcode {36}}
+ white {set colorcode {37}}
+ default {set colorcode {37}}
+ }
+ if {$colorcode ne {}} {
+ return "\033\[$b;${colorcode};49m$str\033\[0m"
+ }
+ } else {
+ return $str
+ }
+}
+
+proc find_valgrind_errors {stderr on_termination} {
+ set fd [open $stderr]
+ set buf [read $fd]
+ close $fd
+
+ # Look for stack trace (" at 0x") and other errors (Invalid, Mismatched, etc).
+ # Look for "Warnings", but not the "set address range perms". These don't indicate any real concern.
+ # corrupt-dump unit, not sure why but it seems they don't indicate any real concern.
+ if {[regexp -- { at 0x} $buf] ||
+ [regexp -- {^(?=.*Warning)(?:(?!set address range perms).)*$} $buf] ||
+ [regexp -- {Invalid} $buf] ||
+ [regexp -- {Mismatched} $buf] ||
+ [regexp -- {uninitialized} $buf] ||
+ [regexp -- {has a fishy} $buf] ||
+ [regexp -- {overlap} $buf]} {
+ return $buf
+ }
+
+ # If the process didn't terminate yet, we can't look for the summary report
+ if {!$on_termination} {
+ return ""
+ }
+
+ # Look for the absence of a leak free summary (happens when redis isn't terminated properly).
+ if {(![regexp -- {definitely lost: 0 bytes} $buf] &&
+ ![regexp -- {no leaks are possible} $buf])} {
+ return $buf
+ }
+
+ return ""
+}
+
+# Execute a background process writing random data for the specified number
+# of seconds to the specified Redis instance.
+proc start_write_load {host port seconds} {
+ set tclsh [info nameofexecutable]
+ exec $tclsh tests/helpers/gen_write_load.tcl $host $port $seconds $::tls &
+}
+
+# Stop a process generating write load executed with start_write_load.
+proc stop_write_load {handle} {
+ catch {exec /bin/kill -9 $handle}
+}
+
+proc wait_load_handlers_disconnected {{level 0}} {
+ wait_for_condition 50 100 {
+ ![string match {*name=LOAD_HANDLER*} [r $level client list]]
+ } else {
+ fail "load_handler(s) still connected after too long time."
+ }
+}
+
+proc K { x y } { set x }
+
+# Shuffle a list with Fisher-Yates algorithm.
+proc lshuffle {list} {
+ set n [llength $list]
+ while {$n>1} {
+ set j [expr {int(rand()*$n)}]
+ incr n -1
+ if {$n==$j} continue
+ set v [lindex $list $j]
+ lset list $j [lindex $list $n]
+ lset list $n $v
+ }
+ return $list
+}
+
+# Execute a background process writing complex data for the specified number
+# of ops to the specified Redis instance.
+proc start_bg_complex_data {host port db ops} {
+ set tclsh [info nameofexecutable]
+ exec $tclsh tests/helpers/bg_complex_data.tcl $host $port $db $ops $::tls &
+}
+
+# Stop a process generating write load executed with start_bg_complex_data.
+proc stop_bg_complex_data {handle} {
+ catch {exec /bin/kill -9 $handle}
+}
+
+# Write num keys with the given key prefix and value size (in bytes). If idx is
+# given, it's the index (AKA level) used with the srv procedure and it specifies
+# to which Redis instance to write the keys.
+proc populate {num {prefix key:} {size 3} {idx 0} {prints false}} {
+ r $idx deferred 1
+ if {$num > 16} {set pipeline 16} else {set pipeline $num}
+ set val [string repeat A $size]
+ for {set j 0} {$j < $pipeline} {incr j} {
+ r $idx set $prefix$j $val
+ if {$prints} {puts $j}
+ }
+ for {} {$j < $num} {incr j} {
+ r $idx set $prefix$j $val
+ r $idx read
+ if {$prints} {puts $j}
+ }
+ for {set j 0} {$j < $pipeline} {incr j} {
+ r $idx read
+ if {$prints} {puts $j}
+ }
+ r $idx deferred 0
+}
+
+proc get_child_pid {idx} {
+ set pid [srv $idx pid]
+ if {[file exists "/usr/bin/pgrep"]} {
+ set fd [open "|pgrep -P $pid" "r"]
+ set child_pid [string trim [lindex [split [read $fd] \n] 0]]
+ } else {
+ set fd [open "|ps --ppid $pid -o pid" "r"]
+ set child_pid [string trim [lindex [split [read $fd] \n] 1]]
+ }
+ close $fd
+
+ return $child_pid
+}
+
+proc process_is_alive pid {
+ if {[catch {exec ps -p $pid -f} err]} {
+ return 0
+ } else {
+ if {[string match "*<defunct>*" $err]} { return 0 }
+ return 1
+ }
+}
+
+proc pause_process pid {
+ exec kill -SIGSTOP $pid
+ wait_for_condition 50 100 {
+ [string match {*T*} [lindex [exec ps j $pid] 16]]
+ } else {
+ puts [exec ps j $pid]
+ fail "process didn't stop"
+ }
+}
+
+proc resume_process pid {
+ exec kill -SIGCONT $pid
+}
+
+proc cmdrstat {cmd r} {
+ if {[regexp "\r\ncmdstat_$cmd:(.*?)\r\n" [$r info commandstats] _ value]} {
+ set _ $value
+ }
+}
+
+proc errorrstat {cmd r} {
+ if {[regexp "\r\nerrorstat_$cmd:(.*?)\r\n" [$r info errorstats] _ value]} {
+ set _ $value
+ }
+}
+
+proc latencyrstat_percentiles {cmd r} {
+ if {[regexp "\r\nlatency_percentiles_usec_$cmd:(.*?)\r\n" [$r info latencystats] _ value]} {
+ set _ $value
+ }
+}
+
+proc generate_fuzzy_traffic_on_key {key duration} {
+ # Commands per type, blocking commands removed
+ # TODO: extract these from COMMAND DOCS, and improve to include other types
+ set string_commands {APPEND BITCOUNT BITFIELD BITOP BITPOS DECR DECRBY GET GETBIT GETRANGE GETSET INCR INCRBY INCRBYFLOAT MGET MSET MSETNX PSETEX SET SETBIT SETEX SETNX SETRANGE LCS STRLEN}
+ set hash_commands {HDEL HEXISTS HGET HGETALL HINCRBY HINCRBYFLOAT HKEYS HLEN HMGET HMSET HSCAN HSET HSETNX HSTRLEN HVALS HRANDFIELD}
+ set zset_commands {ZADD ZCARD ZCOUNT ZINCRBY ZINTERSTORE ZLEXCOUNT ZPOPMAX ZPOPMIN ZRANGE ZRANGEBYLEX ZRANGEBYSCORE ZRANK ZREM ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE ZREVRANGE ZREVRANGEBYLEX ZREVRANGEBYSCORE ZREVRANK ZSCAN ZSCORE ZUNIONSTORE ZRANDMEMBER}
+ set list_commands {LINDEX LINSERT LLEN LPOP LPOS LPUSH LPUSHX LRANGE LREM LSET LTRIM RPOP RPOPLPUSH RPUSH RPUSHX}
+ set set_commands {SADD SCARD SDIFF SDIFFSTORE SINTER SINTERSTORE SISMEMBER SMEMBERS SMOVE SPOP SRANDMEMBER SREM SSCAN SUNION SUNIONSTORE}
+ set stream_commands {XACK XADD XCLAIM XDEL XGROUP XINFO XLEN XPENDING XRANGE XREAD XREADGROUP XREVRANGE XTRIM}
+ set commands [dict create string $string_commands hash $hash_commands zset $zset_commands list $list_commands set $set_commands stream $stream_commands]
+
+ set type [r type $key]
+ set cmds [dict get $commands $type]
+ set start_time [clock seconds]
+ set sent {}
+ set succeeded 0
+ while {([clock seconds]-$start_time) < $duration} {
+ # find a random command for our key type
+ set cmd_idx [expr {int(rand()*[llength $cmds])}]
+ set cmd [lindex $cmds $cmd_idx]
+ # get the command details from redis
+ if { [ catch {
+ set cmd_info [lindex [r command info $cmd] 0]
+ } err ] } {
+ # if we failed, it means redis crashed after the previous command
+ return $sent
+ }
+ # try to build a valid command argument
+ set arity [lindex $cmd_info 1]
+ set arity [expr $arity < 0 ? - $arity: $arity]
+ set firstkey [lindex $cmd_info 3]
+ set lastkey [lindex $cmd_info 4]
+ set i 1
+ if {$cmd == "XINFO"} {
+ lappend cmd "STREAM"
+ lappend cmd $key
+ lappend cmd "FULL"
+ incr i 3
+ }
+ if {$cmd == "XREAD"} {
+ lappend cmd "STREAMS"
+ lappend cmd $key
+ randpath {
+ lappend cmd \$
+ } {
+ lappend cmd [randomValue]
+ }
+ incr i 3
+ }
+ if {$cmd == "XADD"} {
+ lappend cmd $key
+ randpath {
+ lappend cmd "*"
+ } {
+ lappend cmd [randomValue]
+ }
+ lappend cmd [randomValue]
+ lappend cmd [randomValue]
+ incr i 4
+ }
+ for {} {$i < $arity} {incr i} {
+ if {$i == $firstkey || $i == $lastkey} {
+ lappend cmd $key
+ } else {
+ lappend cmd [randomValue]
+ }
+ }
+ # execute the command, we expect commands to fail on syntax errors
+ lappend sent $cmd
+ if { ! [ catch {
+ r {*}$cmd
+ } err ] } {
+ incr succeeded
+ } else {
+ set err [format "%s" $err] ;# convert to string for pattern matching
+ if {[string match "*SIGTERM*" $err]} {
+ puts "commands caused test to hang:"
+ foreach cmd $sent {
+ foreach arg $cmd {
+ puts -nonewline "[string2printable $arg] "
+ }
+ puts ""
+ }
+ # Re-raise, let handler up the stack take care of this.
+ error $err $::errorInfo
+ }
+ }
+ }
+
+ # print stats so that we know if we managed to generate commands that actually made sense
+ #if {$::verbose} {
+ # set count [llength $sent]
+ # puts "Fuzzy traffic sent: $count, succeeded: $succeeded"
+ #}
+
+ # return the list of commands we sent
+ return $sent
+}
+
+proc string2printable s {
+ set res {}
+ set has_special_chars false
+ foreach i [split $s {}] {
+ scan $i %c int
+ # non printable characters, including space and excluding: " \ $ { }
+ if {$int < 32 || $int > 122 || $int == 34 || $int == 36 || $int == 92} {
+ set has_special_chars true
+ }
+ # TCL8.5 has issues mixing \x notation and normal chars in the same
+ # source code string, so we'll convert the entire string.
+ append res \\x[format %02X $int]
+ }
+ if {!$has_special_chars} {
+ return $s
+ }
+ set res "\"$res\""
+ return $res
+}
+
+# Calculation value of Chi-Square Distribution. By this value
+# we can verify the random distribution sample confidence.
+# Based on the following wiki:
+# https://en.wikipedia.org/wiki/Chi-square_distribution
+#
+# param res Random sample list
+# return Value of Chi-Square Distribution
+#
+# x2_value: return of chi_square_value function
+# df: Degrees of freedom, Number of independent values minus 1
+#
+# By using x2_value and df to back check the cardinality table,
+# we can know the confidence of the random sample.
+proc chi_square_value {res} {
+ unset -nocomplain mydict
+ foreach key $res {
+ dict incr mydict $key 1
+ }
+
+ set x2_value 0
+ set p [expr [llength $res] / [dict size $mydict]]
+ foreach key [dict keys $mydict] {
+ set value [dict get $mydict $key]
+
+ # Aggregate the chi-square value of each element
+ set v [expr {pow($value - $p, 2) / $p}]
+ set x2_value [expr {$x2_value + $v}]
+ }
+
+ return $x2_value
+}
+
+#subscribe to Pub/Sub channels
+proc consume_subscribe_messages {client type channels} {
+ set numsub -1
+ set counts {}
+
+ for {set i [llength $channels]} {$i > 0} {incr i -1} {
+ set msg [$client read]
+ assert_equal $type [lindex $msg 0]
+
+ # when receiving subscribe messages the channels names
+ # are ordered. when receiving unsubscribe messages
+ # they are unordered
+ set idx [lsearch -exact $channels [lindex $msg 1]]
+ if {[string match "*unsubscribe" $type]} {
+ assert {$idx >= 0}
+ } else {
+ assert {$idx == 0}
+ }
+ set channels [lreplace $channels $idx $idx]
+
+ # aggregate the subscription count to return to the caller
+ lappend counts [lindex $msg 2]
+ }
+
+ # we should have received messages for channels
+ assert {[llength $channels] == 0}
+ return $counts
+}
+
+proc subscribe {client channels} {
+ $client subscribe {*}$channels
+ consume_subscribe_messages $client subscribe $channels
+}
+
+proc ssubscribe {client channels} {
+ $client ssubscribe {*}$channels
+ consume_subscribe_messages $client ssubscribe $channels
+}
+
+proc unsubscribe {client {channels {}}} {
+ $client unsubscribe {*}$channels
+ consume_subscribe_messages $client unsubscribe $channels
+}
+
+proc sunsubscribe {client {channels {}}} {
+ $client sunsubscribe {*}$channels
+ consume_subscribe_messages $client sunsubscribe $channels
+}
+
+proc psubscribe {client channels} {
+ $client psubscribe {*}$channels
+ consume_subscribe_messages $client psubscribe $channels
+}
+
+proc punsubscribe {client {channels {}}} {
+ $client punsubscribe {*}$channels
+ consume_subscribe_messages $client punsubscribe $channels
+}
+
+proc debug_digest_value {key} {
+ if {[lsearch $::denytags "needs:debug"] >= 0 || $::ignoredigest} {
+ return "dummy-digest-value"
+ }
+ r debug digest-value $key
+}
+
+proc debug_digest {{level 0}} {
+ if {[lsearch $::denytags "needs:debug"] >= 0 || $::ignoredigest} {
+ return "dummy-digest"
+ }
+ r $level debug digest
+}
+
+proc wait_for_blocked_client {{idx 0}} {
+ wait_for_condition 50 100 {
+ [s $idx blocked_clients] ne 0
+ } else {
+ fail "no blocked clients"
+ }
+}
+
+proc wait_for_blocked_clients_count {count {maxtries 100} {delay 10} {idx 0}} {
+ wait_for_condition $maxtries $delay {
+ [s $idx blocked_clients] == $count
+ } else {
+ fail "Timeout waiting for blocked clients"
+ }
+}
+
+proc read_from_aof {fp} {
+ # Input fp is a blocking binary file descriptor of an opened AOF file.
+ if {[gets $fp count] == -1} return ""
+ set count [string range $count 1 end]
+
+ # Return a list of arguments for the command.
+ set res {}
+ for {set j 0} {$j < $count} {incr j} {
+ read $fp 1
+ set arg [::redis::redis_bulk_read $fp]
+ if {$j == 0} {set arg [string tolower $arg]}
+ lappend res $arg
+ }
+ return $res
+}
+
+proc assert_aof_content {aof_path patterns} {
+ set fp [open $aof_path r]
+ fconfigure $fp -translation binary
+ fconfigure $fp -blocking 1
+
+ for {set j 0} {$j < [llength $patterns]} {incr j} {
+ assert_match [lindex $patterns $j] [read_from_aof $fp]
+ }
+}
+
+proc config_set {param value {options {}}} {
+ set mayfail 0
+ foreach option $options {
+ switch $option {
+ "mayfail" {
+ set mayfail 1
+ }
+ default {
+ error "Unknown option $option"
+ }
+ }
+ }
+
+ if {[catch {r config set $param $value} err]} {
+ if {!$mayfail} {
+ error $err
+ } else {
+ if {$::verbose} {
+ puts "Ignoring CONFIG SET $param $value failure: $err"
+ }
+ }
+ }
+}
+
+proc config_get_set {param value {options {}}} {
+ set config [lindex [r config get $param] 1]
+ config_set $param $value $options
+ return $config
+}
+
+proc delete_lines_with_pattern {filename tmpfilename pattern} {
+ set fh_in [open $filename r]
+ set fh_out [open $tmpfilename w]
+ while {[gets $fh_in line] != -1} {
+ if {![regexp $pattern $line]} {
+ puts $fh_out $line
+ }
+ }
+ close $fh_in
+ close $fh_out
+ file rename -force $tmpfilename $filename
+}
+
+proc get_nonloopback_addr {} {
+ set addrlist [list {}]
+ catch { set addrlist [exec hostname -I] }
+ return [lindex $addrlist 0]
+}
+
+proc get_nonloopback_client {} {
+ return [redis [get_nonloopback_addr] [srv 0 "port"] 0 $::tls]
+}
+
+# The following functions and variables are used only when running large-memory
+# tests. We avoid defining them when not running large-memory tests because the
+# global variables takes up lots of memory.
+proc init_large_mem_vars {} {
+ if {![info exists ::str500]} {
+ set ::str500 [string repeat x 500000000] ;# 500mb
+ set ::str500_len [string length $::str500]
+ }
+}
+
+# Utility function to write big argument into redis client connection
+proc write_big_bulk {size {prefix ""} {skip_read no}} {
+ init_large_mem_vars
+
+ assert {[string length prefix] <= $size}
+ r write "\$$size\r\n"
+ r write $prefix
+ incr size -[string length $prefix]
+ while {$size >= 500000000} {
+ r write $::str500
+ incr size -500000000
+ }
+ if {$size > 0} {
+ r write [string repeat x $size]
+ }
+ r write "\r\n"
+ if {!$skip_read} {
+ r flush
+ r read
+ }
+}
+
+# Utility to read big bulk response (work around Tcl limitations)
+proc read_big_bulk {code {compare no} {prefix ""}} {
+ init_large_mem_vars
+
+ r readraw 1
+ set resp_len [uplevel 1 $code] ;# get the first line of the RESP response
+ assert_equal [string range $resp_len 0 0] "$"
+ set resp_len [string range $resp_len 1 end]
+ set prefix_len [string length $prefix]
+ if {$compare} {
+ assert {$prefix_len <= $resp_len}
+ assert {$prefix_len <= $::str500_len}
+ }
+
+ set remaining $resp_len
+ while {$remaining > 0} {
+ set l $remaining
+ if {$l > $::str500_len} {set l $::str500_len} ; # can't read more than 2gb at a time, so read 500mb so we can easily verify read data
+ set read_data [r rawread $l]
+ set nbytes [string length $read_data]
+ if {$compare} {
+ set comp_len $nbytes
+ # Compare prefix part
+ if {$remaining == $resp_len} {
+ assert_equal $prefix [string range $read_data 0 [expr $prefix_len - 1]]
+ set read_data [string range $read_data $prefix_len $nbytes]
+ incr comp_len -$prefix_len
+ }
+ # Compare rest of data, evaluate and then assert to avoid huge print in case of failure
+ set data_equal [expr {$read_data == [string range $::str500 0 [expr $comp_len - 1]]}]
+ assert $data_equal
+ }
+ incr remaining -$nbytes
+ }
+ assert_equal [r rawread 2] "\r\n"
+ r readraw 0
+ return $resp_len
+}
+
+proc prepare_value {size} {
+ set _v "c"
+ for {set i 1} {$i < $size} {incr i} {
+ append _v 0
+ }
+ return $_v
+}
+
+proc memory_usage {key} {
+ set usage [r memory usage $key]
+ if {![string match {*jemalloc*} [s mem_allocator]]} {
+ # libc allocator can sometimes return a different size allocation for the same requested size
+ # this makes tests that rely on MEMORY USAGE unreliable, so instead we return a constant 1
+ set usage 1
+ }
+ return $usage
+}
+
+# forward compatibility, lmap missing in TCL 8.5
+proc lmap args {
+ set body [lindex $args end]
+ set args [lrange $args 0 end-1]
+ set n 0
+ set pairs [list]
+ foreach {varnames listval} $args {
+ set varlist [list]
+ foreach varname $varnames {
+ upvar 1 $varname var$n
+ lappend varlist var$n
+ incr n
+ }
+ lappend pairs $varlist $listval
+ }
+ set temp [list]
+ foreach {*}$pairs {
+ lappend temp [uplevel 1 $body]
+ }
+ set temp
+}
+
+proc format_command {args} {
+ set cmd "*[llength $args]\r\n"
+ foreach a $args {
+ append cmd "$[string length $a]\r\n$a\r\n"
+ }
+ set _ $cmd
+}
+
diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl
new file mode 100644
index 0000000..21fa35d
--- /dev/null
+++ b/tests/test_helper.tcl
@@ -0,0 +1,937 @@
+# Redis test suite. Copyright (C) 2009 Salvatore Sanfilippo antirez@gmail.com
+# This software is released under the BSD License. See the COPYING file for
+# more information.
+
+package require Tcl 8.5
+
+set tcl_precision 17
+source tests/support/redis.tcl
+source tests/support/aofmanifest.tcl
+source tests/support/server.tcl
+source tests/support/cluster_util.tcl
+source tests/support/tmpfile.tcl
+source tests/support/test.tcl
+source tests/support/util.tcl
+
+set ::all_tests {
+ unit/printver
+ unit/dump
+ unit/auth
+ unit/protocol
+ unit/keyspace
+ unit/scan
+ unit/info
+ unit/info-command
+ unit/type/string
+ unit/type/incr
+ unit/type/list
+ unit/type/list-2
+ unit/type/list-3
+ unit/type/set
+ unit/type/zset
+ unit/type/hash
+ unit/type/stream
+ unit/type/stream-cgroups
+ unit/sort
+ unit/expire
+ unit/other
+ unit/multi
+ unit/quit
+ unit/aofrw
+ unit/acl
+ unit/acl-v2
+ unit/latency-monitor
+ integration/block-repl
+ integration/replication
+ integration/replication-2
+ integration/replication-3
+ integration/replication-4
+ integration/replication-psync
+ integration/replication-buffer
+ integration/shutdown
+ integration/aof
+ integration/aof-race
+ integration/aof-multi-part
+ integration/rdb
+ integration/corrupt-dump
+ integration/corrupt-dump-fuzzer
+ integration/convert-zipmap-hash-on-load
+ integration/convert-ziplist-hash-on-load
+ integration/convert-ziplist-zset-on-load
+ integration/logging
+ integration/psync2
+ integration/psync2-reg
+ integration/psync2-pingoff
+ integration/psync2-master-restart
+ integration/failover
+ integration/redis-cli
+ integration/redis-benchmark
+ integration/dismiss-mem
+ unit/pubsub
+ unit/pubsubshard
+ unit/slowlog
+ unit/scripting
+ unit/functions
+ unit/maxmemory
+ unit/introspection
+ unit/introspection-2
+ unit/limits
+ unit/obuf-limits
+ unit/bitops
+ unit/bitfield
+ unit/geo
+ unit/memefficiency
+ unit/hyperloglog
+ unit/lazyfree
+ unit/wait
+ unit/pause
+ unit/querybuf
+ unit/tls
+ unit/tracking
+ unit/oom-score-adj
+ unit/shutdown
+ unit/networking
+ unit/client-eviction
+ unit/violations
+ unit/replybufsize
+ unit/cluster/misc
+ unit/cluster/cli
+ unit/cluster/scripting
+ unit/cluster/hostnames
+ unit/cluster/human-announced-nodename
+ unit/cluster/multi-slot-operations
+ unit/cluster/slot-ownership
+ unit/cluster/links
+ unit/cluster/cluster-response-tls
+}
+# Index to the next test to run in the ::all_tests list.
+set ::next_test 0
+
+set ::host 127.0.0.1
+set ::port 6379; # port for external server
+set ::baseport 21111; # initial port for spawned redis servers
+set ::portcount 8000; # we don't wanna use more than 10000 to avoid collision with cluster bus ports
+set ::traceleaks 0
+set ::valgrind 0
+set ::durable 0
+set ::tls 0
+set ::tls_module 0
+set ::stack_logging 0
+set ::verbose 0
+set ::quiet 0
+set ::denytags {}
+set ::skiptests {}
+set ::skipunits {}
+set ::no_latency 0
+set ::allowtags {}
+set ::only_tests {}
+set ::single_tests {}
+set ::run_solo_tests {}
+set ::skip_till ""
+set ::external 0; # If "1" this means, we are running against external instance
+set ::file ""; # If set, runs only the tests in this comma separated list
+set ::curfile ""; # Hold the filename of the current suite
+set ::accurate 0; # If true runs fuzz tests with more iterations
+set ::force_failure 0
+set ::timeout 1200; # 20 minutes without progresses will quit the test.
+set ::last_progress [clock seconds]
+set ::active_servers {} ; # Pids of active Redis instances.
+set ::dont_clean 0
+set ::dont_pre_clean 0
+set ::wait_server 0
+set ::stop_on_failure 0
+set ::dump_logs 0
+set ::loop 0
+set ::tlsdir "tests/tls"
+set ::singledb 0
+set ::cluster_mode 0
+set ::ignoreencoding 0
+set ::ignoredigest 0
+set ::large_memory 0
+set ::log_req_res 0
+set ::force_resp3 0
+
+# Set to 1 when we are running in client mode. The Redis test uses a
+# server-client model to run tests simultaneously. The server instance
+# runs the specified number of client instances that will actually run tests.
+# The server is responsible of showing the result to the user, and exit with
+# the appropriate exit code depending on the test outcome.
+set ::client 0
+set ::numclients 16
+
+# This function is called by one of the test clients when it receives
+# a "run" command from the server, with a filename as data.
+# It will run the specified test source file and signal it to the
+# test server when finished.
+proc execute_test_file __testname {
+ set path "tests/$__testname.tcl"
+ set ::curfile $path
+ source $path
+ send_data_packet $::test_server_fd done "$__testname"
+}
+
+# This function is called by one of the test clients when it receives
+# a "run_code" command from the server, with a verbatim test source code
+# as argument, and an associated name.
+# It will run the specified code and signal it to the test server when
+# finished.
+proc execute_test_code {__testname filename code} {
+ set ::curfile $filename
+ eval $code
+ send_data_packet $::test_server_fd done "$__testname"
+}
+
+# Setup a list to hold a stack of server configs. When calls to start_server
+# are nested, use "srv 0 pid" to get the pid of the inner server. To access
+# outer servers, use "srv -1 pid" etcetera.
+set ::servers {}
+proc srv {args} {
+ set level 0
+ if {[string is integer [lindex $args 0]]} {
+ set level [lindex $args 0]
+ set property [lindex $args 1]
+ } else {
+ set property [lindex $args 0]
+ }
+ set srv [lindex $::servers end+$level]
+ dict get $srv $property
+}
+
+# Provide easy access to the client for the inner server. It's possible to
+# prepend the argument list with a negative level to access clients for
+# servers running in outer blocks.
+proc r {args} {
+ set level 0
+ if {[string is integer [lindex $args 0]]} {
+ set level [lindex $args 0]
+ set args [lrange $args 1 end]
+ }
+ [srv $level "client"] {*}$args
+}
+
+# Returns a Redis instance by index.
+proc Rn {n} {
+ set level [expr -1*$n]
+ return [srv $level "client"]
+}
+
+# Provide easy access to a client for an inner server. Requires a positive
+# index, unlike r which uses an optional negative index.
+proc R {n args} {
+ [Rn $n] {*}$args
+}
+
+proc reconnect {args} {
+ set level [lindex $args 0]
+ if {[string length $level] == 0 || ![string is integer $level]} {
+ set level 0
+ }
+
+ set srv [lindex $::servers end+$level]
+ set host [dict get $srv "host"]
+ set port [dict get $srv "port"]
+ set config [dict get $srv "config"]
+ set client [redis $host $port 0 $::tls]
+ if {[dict exists $srv "client"]} {
+ set old [dict get $srv "client"]
+ $old close
+ }
+ dict set srv "client" $client
+
+ # select the right db when we don't have to authenticate
+ if {![dict exists $config "requirepass"] && !$::singledb} {
+ $client select 9
+ }
+
+ # re-set $srv in the servers list
+ lset ::servers end+$level $srv
+}
+
+proc redis_deferring_client {args} {
+ set level 0
+ if {[llength $args] > 0 && [string is integer [lindex $args 0]]} {
+ set level [lindex $args 0]
+ set args [lrange $args 1 end]
+ }
+
+ # create client that defers reading reply
+ set client [redis [srv $level "host"] [srv $level "port"] 1 $::tls]
+
+ # select the right db and read the response (OK)
+ if {!$::singledb} {
+ $client select 9
+ $client read
+ } else {
+ # For timing/symmetry with the above select
+ $client ping
+ $client read
+ }
+ return $client
+}
+
+proc redis_client {args} {
+ set level 0
+ if {[llength $args] > 0 && [string is integer [lindex $args 0]]} {
+ set level [lindex $args 0]
+ set args [lrange $args 1 end]
+ }
+
+ # create client that defers reading reply
+ set client [redis [srv $level "host"] [srv $level "port"] 0 $::tls]
+
+ # select the right db and read the response (OK), or at least ping
+ # the server if we're in a singledb mode.
+ if {$::singledb} {
+ $client ping
+ } else {
+ $client select 9
+ }
+ return $client
+}
+
+# Provide easy access to INFO properties. Same semantic as "proc r".
+proc s {args} {
+ set level 0
+ if {[string is integer [lindex $args 0]]} {
+ set level [lindex $args 0]
+ set args [lrange $args 1 end]
+ }
+ status [srv $level "client"] [lindex $args 0]
+}
+
+# Get the specified field from the givens instances cluster info output.
+proc CI {index field} {
+ getInfoProperty [R $index cluster info] $field
+}
+
+# Test wrapped into run_solo are sent back from the client to the
+# test server, so that the test server will send them again to
+# clients once the clients are idle.
+proc run_solo {name code} {
+ if {$::numclients == 1 || $::loop || $::external} {
+ # run_solo is not supported in these scenarios, just run the code.
+ eval $code
+ return
+ }
+ send_data_packet $::test_server_fd run_solo [list $name $::curfile $code]
+}
+
+proc cleanup {} {
+ if {!$::quiet} {puts -nonewline "Cleanup: may take some time... "}
+ flush stdout
+ catch {exec rm -rf {*}[glob tests/tmp/redis.conf.*]}
+ catch {exec rm -rf {*}[glob tests/tmp/server.*]}
+ if {!$::quiet} {puts "OK"}
+}
+
+proc test_server_main {} {
+ if {!$::dont_pre_clean} cleanup
+ set tclsh [info nameofexecutable]
+ # Open a listening socket, trying different ports in order to find a
+ # non busy one.
+ set clientport [find_available_port [expr {$::baseport - 32}] 32]
+ if {!$::quiet} {
+ puts "Starting test server at port $clientport"
+ }
+ socket -server accept_test_clients -myaddr 127.0.0.1 $clientport
+
+ # Start the client instances
+ set ::clients_pids {}
+ if {$::external} {
+ set p [exec $tclsh [info script] {*}$::argv \
+ --client $clientport &]
+ lappend ::clients_pids $p
+ } else {
+ set start_port $::baseport
+ set port_count [expr {$::portcount / $::numclients}]
+ for {set j 0} {$j < $::numclients} {incr j} {
+ set p [exec $tclsh [info script] {*}$::argv \
+ --client $clientport --baseport $start_port --portcount $port_count &]
+ lappend ::clients_pids $p
+ incr start_port $port_count
+ }
+ }
+
+ # Setup global state for the test server
+ set ::idle_clients {}
+ set ::active_clients {}
+ array set ::active_clients_task {}
+ array set ::clients_start_time {}
+ set ::clients_time_history {}
+ set ::failed_tests {}
+
+ # Enter the event loop to handle clients I/O
+ after 100 test_server_cron
+ vwait forever
+}
+
+# This function gets called 10 times per second.
+proc test_server_cron {} {
+ set elapsed [expr {[clock seconds]-$::last_progress}]
+
+ if {$elapsed > $::timeout} {
+ set err "\[[colorstr red TIMEOUT]\]: clients state report follows."
+ puts $err
+ lappend ::failed_tests $err
+ show_clients_state
+ kill_clients
+ force_kill_all_servers
+ the_end
+ }
+
+ after 100 test_server_cron
+}
+
+proc accept_test_clients {fd addr port} {
+ fconfigure $fd -encoding binary
+ fileevent $fd readable [list read_from_test_client $fd]
+}
+
+# This is the readable handler of our test server. Clients send us messages
+# in the form of a status code such and additional data. Supported
+# status types are:
+#
+# ready: the client is ready to execute the command. Only sent at client
+# startup. The server will queue the client FD in the list of idle
+# clients.
+# testing: just used to signal that a given test started.
+# ok: a test was executed with success.
+# err: a test was executed with an error.
+# skip: a test was skipped by skipfile or individual test options.
+# ignore: a test was skipped by a group tag.
+# exception: there was a runtime exception while executing the test.
+# done: all the specified test file was processed, this test client is
+# ready to accept a new task.
+proc read_from_test_client fd {
+ set bytes [gets $fd]
+ set payload [read $fd $bytes]
+ foreach {status data elapsed} $payload break
+ set ::last_progress [clock seconds]
+
+ if {$status eq {ready}} {
+ if {!$::quiet} {
+ puts "\[$status\]: $data"
+ }
+ signal_idle_client $fd
+ } elseif {$status eq {done}} {
+ set elapsed [expr {[clock seconds]-$::clients_start_time($fd)}]
+ set all_tests_count [llength $::all_tests]
+ set running_tests_count [expr {[llength $::active_clients]-1}]
+ set completed_tests_count [expr {$::next_test-$running_tests_count}]
+ puts "\[$completed_tests_count/$all_tests_count [colorstr yellow $status]\]: $data ($elapsed seconds)"
+ lappend ::clients_time_history $elapsed $data
+ signal_idle_client $fd
+ set ::active_clients_task($fd) "(DONE) $data"
+ } elseif {$status eq {ok}} {
+ if {!$::quiet} {
+ puts "\[[colorstr green $status]\]: $data ($elapsed ms)"
+ }
+ set ::active_clients_task($fd) "(OK) $data"
+ } elseif {$status eq {skip}} {
+ if {!$::quiet} {
+ puts "\[[colorstr yellow $status]\]: $data"
+ }
+ } elseif {$status eq {ignore}} {
+ if {!$::quiet} {
+ puts "\[[colorstr cyan $status]\]: $data"
+ }
+ } elseif {$status eq {err}} {
+ set err "\[[colorstr red $status]\]: $data"
+ puts $err
+ lappend ::failed_tests $err
+ set ::active_clients_task($fd) "(ERR) $data"
+ if {$::stop_on_failure} {
+ puts -nonewline "(Test stopped, press enter to resume the tests)"
+ flush stdout
+ gets stdin
+ }
+ } elseif {$status eq {exception}} {
+ puts "\[[colorstr red $status]\]: $data"
+ kill_clients
+ force_kill_all_servers
+ exit 1
+ } elseif {$status eq {testing}} {
+ set ::active_clients_task($fd) "(IN PROGRESS) $data"
+ } elseif {$status eq {server-spawning}} {
+ set ::active_clients_task($fd) "(SPAWNING SERVER) $data"
+ } elseif {$status eq {server-spawned}} {
+ lappend ::active_servers $data
+ set ::active_clients_task($fd) "(SPAWNED SERVER) pid:$data"
+ } elseif {$status eq {server-killing}} {
+ set ::active_clients_task($fd) "(KILLING SERVER) pid:$data"
+ } elseif {$status eq {server-killed}} {
+ set ::active_servers [lsearch -all -inline -not -exact $::active_servers $data]
+ set ::active_clients_task($fd) "(KILLED SERVER) pid:$data"
+ } elseif {$status eq {run_solo}} {
+ lappend ::run_solo_tests $data
+ } else {
+ if {!$::quiet} {
+ puts "\[$status\]: $data"
+ }
+ }
+}
+
+proc show_clients_state {} {
+ # The following loop is only useful for debugging tests that may
+ # enter an infinite loop.
+ foreach x $::active_clients {
+ if {[info exist ::active_clients_task($x)]} {
+ puts "$x => $::active_clients_task($x)"
+ } else {
+ puts "$x => ???"
+ }
+ }
+}
+
+proc kill_clients {} {
+ foreach p $::clients_pids {
+ catch {exec kill $p}
+ }
+}
+
+proc force_kill_all_servers {} {
+ foreach p $::active_servers {
+ puts "Killing still running Redis server $p"
+ catch {exec kill -9 $p}
+ }
+}
+
+proc lpop {listVar {count 1}} {
+ upvar 1 $listVar l
+ set ele [lindex $l 0]
+ set l [lrange $l 1 end]
+ set ele
+}
+
+proc lremove {listVar value} {
+ upvar 1 $listVar var
+ set idx [lsearch -exact $var $value]
+ set var [lreplace $var $idx $idx]
+}
+
+# A new client is idle. Remove it from the list of active clients and
+# if there are still test units to run, launch them.
+proc signal_idle_client fd {
+ # Remove this fd from the list of active clients.
+ set ::active_clients \
+ [lsearch -all -inline -not -exact $::active_clients $fd]
+
+ # New unit to process?
+ if {$::next_test != [llength $::all_tests]} {
+ if {!$::quiet} {
+ puts [colorstr bold-white "Testing [lindex $::all_tests $::next_test]"]
+ set ::active_clients_task($fd) "ASSIGNED: $fd ([lindex $::all_tests $::next_test])"
+ }
+ set ::clients_start_time($fd) [clock seconds]
+ send_data_packet $fd run [lindex $::all_tests $::next_test]
+ lappend ::active_clients $fd
+ incr ::next_test
+ if {$::loop && $::next_test == [llength $::all_tests]} {
+ set ::next_test 0
+ incr ::loop -1
+ }
+ } elseif {[llength $::run_solo_tests] != 0 && [llength $::active_clients] == 0} {
+ if {!$::quiet} {
+ puts [colorstr bold-white "Testing solo test"]
+ set ::active_clients_task($fd) "ASSIGNED: $fd solo test"
+ }
+ set ::clients_start_time($fd) [clock seconds]
+ send_data_packet $fd run_code [lpop ::run_solo_tests]
+ lappend ::active_clients $fd
+ } else {
+ lappend ::idle_clients $fd
+ set ::active_clients_task($fd) "SLEEPING, no more units to assign"
+ if {[llength $::active_clients] == 0} {
+ the_end
+ }
+ }
+}
+
+# The the_end function gets called when all the test units were already
+# executed, so the test finished.
+proc the_end {} {
+ # TODO: print the status, exit with the right exit code.
+ puts "\n The End\n"
+ puts "Execution time of different units:"
+ foreach {time name} $::clients_time_history {
+ puts " $time seconds - $name"
+ }
+ if {[llength $::failed_tests]} {
+ puts "\n[colorstr bold-red {!!! WARNING}] The following tests failed:\n"
+ foreach failed $::failed_tests {
+ puts "*** $failed"
+ }
+ if {!$::dont_clean} cleanup
+ exit 1
+ } else {
+ puts "\n[colorstr bold-white {\o/}] [colorstr bold-green {All tests passed without errors!}]\n"
+ if {!$::dont_clean} cleanup
+ exit 0
+ }
+}
+
+# The client is not even driven (the test server is instead) as we just need
+# to read the command, execute, reply... all this in a loop.
+proc test_client_main server_port {
+ set ::test_server_fd [socket localhost $server_port]
+ fconfigure $::test_server_fd -encoding binary
+ send_data_packet $::test_server_fd ready [pid]
+ while 1 {
+ set bytes [gets $::test_server_fd]
+ set payload [read $::test_server_fd $bytes]
+ foreach {cmd data} $payload break
+ if {$cmd eq {run}} {
+ execute_test_file $data
+ } elseif {$cmd eq {run_code}} {
+ foreach {name filename code} $data break
+ execute_test_code $name $filename $code
+ } else {
+ error "Unknown test client command: $cmd"
+ }
+ }
+}
+
+proc send_data_packet {fd status data {elapsed 0}} {
+ set payload [list $status $data $elapsed]
+ puts $fd [string length $payload]
+ puts -nonewline $fd $payload
+ flush $fd
+}
+
+proc print_help_screen {} {
+ puts [join {
+ "--valgrind Run the test over valgrind."
+ "--durable suppress test crashes and keep running"
+ "--stack-logging Enable OSX leaks/malloc stack logging."
+ "--accurate Run slow randomized tests for more iterations."
+ "--quiet Don't show individual tests."
+ "--single <unit> Just execute the specified unit (see next option). This option can be repeated."
+ "--verbose Increases verbosity."
+ "--list-tests List all the available test units."
+ "--only <test> Just execute the specified test by test name or tests that match <test> regexp (if <test> starts with '/'). This option can be repeated."
+ "--skip-till <unit> Skip all units until (and including) the specified one."
+ "--skipunit <unit> Skip one unit."
+ "--clients <num> Number of test clients (default 16)."
+ "--timeout <sec> Test timeout in seconds (default 20 min)."
+ "--force-failure Force the execution of a test that always fails."
+ "--config <k> <v> Extra config file argument."
+ "--skipfile <file> Name of a file containing test names or regexp patterns (if <test> starts with '/') that should be skipped (one per line). This option can be repeated."
+ "--skiptest <test> Test name or regexp pattern (if <test> starts with '/') to skip. This option can be repeated."
+ "--tags <tags> Run only tests having specified tags or not having '-' prefixed tags."
+ "--dont-clean Don't delete redis log files after the run."
+ "--dont-pre-clean Don't delete existing redis log files before the run."
+ "--no-latency Skip latency measurements and validation by some tests."
+ "--stop Blocks once the first test fails."
+ "--loop Execute the specified set of tests forever."
+ "--loops <count> Execute the specified set of tests several times."
+ "--wait-server Wait after server is started (so that you can attach a debugger)."
+ "--dump-logs Dump server log on test failure."
+ "--tls Run tests in TLS mode."
+ "--tls-module Run tests in TLS mode with Redis module."
+ "--host <addr> Run tests against an external host."
+ "--port <port> TCP port to use against external host."
+ "--baseport <port> Initial port number for spawned redis servers."
+ "--portcount <num> Port range for spawned redis servers."
+ "--singledb Use a single database, avoid SELECT."
+ "--cluster-mode Run tests in cluster protocol compatible mode."
+ "--ignore-encoding Don't validate object encoding."
+ "--ignore-digest Don't use debug digest validations."
+ "--large-memory Run tests using over 100mb."
+ "--help Print this help screen."
+ } "\n"]
+}
+
+# parse arguments
+for {set j 0} {$j < [llength $argv]} {incr j} {
+ set opt [lindex $argv $j]
+ set arg [lindex $argv [expr $j+1]]
+ if {$opt eq {--tags}} {
+ foreach tag $arg {
+ if {[string index $tag 0] eq "-"} {
+ lappend ::denytags [string range $tag 1 end]
+ } else {
+ lappend ::allowtags $tag
+ }
+ }
+ incr j
+ } elseif {$opt eq {--config}} {
+ set arg2 [lindex $argv [expr $j+2]]
+ lappend ::global_overrides $arg
+ lappend ::global_overrides $arg2
+ incr j 2
+ } elseif {$opt eq {--log-req-res}} {
+ set ::log_req_res 1
+ } elseif {$opt eq {--force-resp3}} {
+ set ::force_resp3 1
+ } elseif {$opt eq {--skipfile}} {
+ incr j
+ set fp [open $arg r]
+ set file_data [read $fp]
+ close $fp
+ set ::skiptests [concat $::skiptests [split $file_data "\n"]]
+ } elseif {$opt eq {--skiptest}} {
+ lappend ::skiptests $arg
+ incr j
+ } elseif {$opt eq {--valgrind}} {
+ set ::valgrind 1
+ } elseif {$opt eq {--stack-logging}} {
+ if {[string match {*Darwin*} [exec uname -a]]} {
+ set ::stack_logging 1
+ }
+ } elseif {$opt eq {--quiet}} {
+ set ::quiet 1
+ } elseif {$opt eq {--tls} || $opt eq {--tls-module}} {
+ package require tls 1.6
+ set ::tls 1
+ ::tls::init \
+ -cafile "$::tlsdir/ca.crt" \
+ -certfile "$::tlsdir/client.crt" \
+ -keyfile "$::tlsdir/client.key"
+ if {$opt eq {--tls-module}} {
+ set ::tls_module 1
+ }
+ } elseif {$opt eq {--host}} {
+ set ::external 1
+ set ::host $arg
+ incr j
+ } elseif {$opt eq {--port}} {
+ set ::port $arg
+ incr j
+ } elseif {$opt eq {--baseport}} {
+ set ::baseport $arg
+ incr j
+ } elseif {$opt eq {--portcount}} {
+ set ::portcount $arg
+ incr j
+ } elseif {$opt eq {--accurate}} {
+ set ::accurate 1
+ } elseif {$opt eq {--force-failure}} {
+ set ::force_failure 1
+ } elseif {$opt eq {--single}} {
+ lappend ::single_tests $arg
+ incr j
+ } elseif {$opt eq {--only}} {
+ lappend ::only_tests $arg
+ incr j
+ } elseif {$opt eq {--skipunit}} {
+ lappend ::skipunits $arg
+ incr j
+ } elseif {$opt eq {--skip-till}} {
+ set ::skip_till $arg
+ incr j
+ } elseif {$opt eq {--list-tests}} {
+ foreach t $::all_tests {
+ puts $t
+ }
+ exit 0
+ } elseif {$opt eq {--verbose}} {
+ incr ::verbose
+ } elseif {$opt eq {--client}} {
+ set ::client 1
+ set ::test_server_port $arg
+ incr j
+ } elseif {$opt eq {--clients}} {
+ set ::numclients $arg
+ incr j
+ } elseif {$opt eq {--durable}} {
+ set ::durable 1
+ } elseif {$opt eq {--dont-clean}} {
+ set ::dont_clean 1
+ } elseif {$opt eq {--dont-pre-clean}} {
+ set ::dont_pre_clean 1
+ } elseif {$opt eq {--no-latency}} {
+ set ::no_latency 1
+ } elseif {$opt eq {--wait-server}} {
+ set ::wait_server 1
+ } elseif {$opt eq {--dump-logs}} {
+ set ::dump_logs 1
+ } elseif {$opt eq {--stop}} {
+ set ::stop_on_failure 1
+ } elseif {$opt eq {--loop}} {
+ set ::loop 2147483647
+ } elseif {$opt eq {--loops}} {
+ set ::loop $arg
+ incr j
+ } elseif {$opt eq {--timeout}} {
+ set ::timeout $arg
+ incr j
+ } elseif {$opt eq {--singledb}} {
+ set ::singledb 1
+ } elseif {$opt eq {--cluster-mode}} {
+ set ::cluster_mode 1
+ set ::singledb 1
+ } elseif {$opt eq {--large-memory}} {
+ set ::large_memory 1
+ } elseif {$opt eq {--ignore-encoding}} {
+ set ::ignoreencoding 1
+ } elseif {$opt eq {--ignore-digest}} {
+ set ::ignoredigest 1
+ } elseif {$opt eq {--help}} {
+ print_help_screen
+ exit 0
+ } else {
+ puts "Wrong argument: $opt"
+ exit 1
+ }
+}
+
+set filtered_tests {}
+
+# Set the filtered tests to be the short list (single_tests) if exists.
+# Otherwise, we start filtering all_tests
+if {[llength $::single_tests] > 0} {
+ set filtered_tests $::single_tests
+} else {
+ set filtered_tests $::all_tests
+}
+
+# If --skip-till option was given, we populate the list of single tests
+# to run with everything *after* the specified unit.
+if {$::skip_till != ""} {
+ set skipping 1
+ foreach t $::all_tests {
+ if {$skipping == 1} {
+ lremove filtered_tests $t
+ }
+ if {$t == $::skip_till} {
+ set skipping 0
+ }
+ }
+ if {$skipping} {
+ puts "test $::skip_till not found"
+ exit 0
+ }
+}
+
+# If --skipunits option was given, we populate the list of single tests
+# to run with everything *not* in the skipunits list.
+if {[llength $::skipunits] > 0} {
+ foreach t $::all_tests {
+ if {[lsearch $::skipunits $t] != -1} {
+ lremove filtered_tests $t
+ }
+ }
+}
+
+# Override the list of tests with the specific tests we want to run
+# in case there was some filter, that is --single, -skipunit or --skip-till options.
+if {[llength $filtered_tests] < [llength $::all_tests]} {
+ set ::all_tests $filtered_tests
+}
+
+proc attach_to_replication_stream_on_connection {conn} {
+ r config set repl-ping-replica-period 3600
+ if {$::tls} {
+ set s [::tls::socket [srv $conn "host"] [srv $conn "port"]]
+ } else {
+ set s [socket [srv $conn "host"] [srv $conn "port"]]
+ }
+ fconfigure $s -translation binary
+ puts -nonewline $s "SYNC\r\n"
+ flush $s
+
+ # Get the count
+ while 1 {
+ set count [gets $s]
+ set prefix [string range $count 0 0]
+ if {$prefix ne {}} break; # Newlines are allowed as PINGs.
+ }
+ if {$prefix ne {$}} {
+ error "attach_to_replication_stream error. Received '$count' as count."
+ }
+ set count [string range $count 1 end]
+
+ # Consume the bulk payload
+ while {$count} {
+ set buf [read $s $count]
+ set count [expr {$count-[string length $buf]}]
+ }
+ return $s
+}
+
+proc attach_to_replication_stream {} {
+ return [attach_to_replication_stream_on_connection 0]
+}
+
+proc read_from_replication_stream {s} {
+ fconfigure $s -blocking 0
+ set attempt 0
+ while {[gets $s count] == -1} {
+ if {[incr attempt] == 10} return ""
+ after 100
+ }
+ fconfigure $s -blocking 1
+ set count [string range $count 1 end]
+
+ # Return a list of arguments for the command.
+ set res {}
+ for {set j 0} {$j < $count} {incr j} {
+ read $s 1
+ set arg [::redis::redis_bulk_read $s]
+ if {$j == 0} {set arg [string tolower $arg]}
+ lappend res $arg
+ }
+ return $res
+}
+
+proc assert_replication_stream {s patterns} {
+ set errors 0
+ set values_list {}
+ set patterns_list {}
+ for {set j 0} {$j < [llength $patterns]} {incr j} {
+ set pattern [lindex $patterns $j]
+ lappend patterns_list $pattern
+ set value [read_from_replication_stream $s]
+ lappend values_list $value
+ if {![string match $pattern $value]} { incr errors }
+ }
+
+ if {$errors == 0} { return }
+
+ set context [info frame -1]
+ close_replication_stream $s ;# for fast exit
+ assert_match $patterns_list $values_list "" $context
+}
+
+proc close_replication_stream {s} {
+ close $s
+ r config set repl-ping-replica-period 10
+ return
+}
+
+# With the parallel test running multiple Redis instances at the same time
+# we need a fast enough computer, otherwise a lot of tests may generate
+# false positives.
+# If the computer is too slow we revert the sequential test without any
+# parallelism, that is, clients == 1.
+proc is_a_slow_computer {} {
+ set start [clock milliseconds]
+ for {set j 0} {$j < 1000000} {incr j} {}
+ set elapsed [expr [clock milliseconds]-$start]
+ expr {$elapsed > 200}
+}
+
+if {$::client} {
+ if {[catch { test_client_main $::test_server_port } err]} {
+ set estr "Executing test client: $err.\n$::errorInfo"
+ if {[catch {send_data_packet $::test_server_fd exception $estr}]} {
+ puts $estr
+ }
+ exit 1
+ }
+} else {
+ if {[is_a_slow_computer]} {
+ puts "** SLOW COMPUTER ** Using a single client to avoid false positives."
+ set ::numclients 1
+ }
+
+ if {[catch { test_server_main } err]} {
+ if {[string length $err] > 0} {
+ # only display error when not generated by the test suite
+ if {$err ne "exception"} {
+ puts $::errorInfo
+ }
+ exit 1
+ }
+ }
+}
diff --git a/tests/tmp/.gitignore b/tests/tmp/.gitignore
new file mode 100644
index 0000000..72e8ffc
--- /dev/null
+++ b/tests/tmp/.gitignore
@@ -0,0 +1 @@
+*
diff --git a/tests/unit/acl-v2.tcl b/tests/unit/acl-v2.tcl
new file mode 100644
index 0000000..b259c27
--- /dev/null
+++ b/tests/unit/acl-v2.tcl
@@ -0,0 +1,525 @@
+start_server {tags {"acl external:skip"}} {
+ set r2 [redis_client]
+ test {Test basic multiple selectors} {
+ r ACL SETUSER selector-1 on -@all resetkeys nopass
+ $r2 auth selector-1 password
+ catch {$r2 ping} err
+ assert_match "*NOPERM*command*" $err
+ catch {$r2 set write::foo bar} err
+ assert_match "*NOPERM*command*" $err
+ catch {$r2 get read::foo} err
+ assert_match "*NOPERM*command*" $err
+
+ r ACL SETUSER selector-1 (+@write ~write::*) (+@read ~read::*)
+ catch {$r2 ping} err
+ assert_equal "OK" [$r2 set write::foo bar]
+ assert_equal "" [$r2 get read::foo]
+ catch {$r2 get write::foo} err
+ assert_match "*NOPERM*key*" $err
+ catch {$r2 set read::foo bar} err
+ assert_match "*NOPERM*key*" $err
+ }
+
+ test {Test ACL selectors by default have no permissions} {
+ r ACL SETUSER selector-default reset ()
+ set user [r ACL GETUSER "selector-default"]
+ assert_equal 1 [llength [dict get $user selectors]]
+ assert_equal "" [dict get [lindex [dict get $user selectors] 0] keys]
+ assert_equal "" [dict get [lindex [dict get $user selectors] 0] channels]
+ assert_equal "-@all" [dict get [lindex [dict get $user selectors] 0] commands]
+ }
+
+ test {Test deleting selectors} {
+ r ACL SETUSER selector-del on "(~added-selector)"
+ set user [r ACL GETUSER "selector-del"]
+ assert_equal "~added-selector" [dict get [lindex [dict get $user selectors] 0] keys]
+ assert_equal [llength [dict get $user selectors]] 1
+
+ r ACL SETUSER selector-del clearselectors
+ set user [r ACL GETUSER "selector-del"]
+ assert_equal [llength [dict get $user selectors]] 0
+ }
+
+ test {Test selector syntax error reports the error in the selector context} {
+ catch {r ACL SETUSER selector-syntax on (this-is-invalid)} e
+ assert_match "*ERR Error in ACL SETUSER modifier '(*)*Syntax*" $e
+
+ catch {r ACL SETUSER selector-syntax on (&* &fail)} e
+ assert_match "*ERR Error in ACL SETUSER modifier '(*)*Adding a pattern after the*" $e
+
+ catch {r ACL SETUSER selector-syntax on (+PING (+SELECT (+DEL} e
+ assert_match "*ERR Unmatched parenthesis in acl selector*" $e
+
+ catch {r ACL SETUSER selector-syntax on (+PING (+SELECT (+DEL ) ) ) } e
+ assert_match "*ERR Error in ACL SETUSER modifier*" $e
+
+ catch {r ACL SETUSER selector-syntax on (+PING (+SELECT (+DEL ) } e
+ assert_match "*ERR Error in ACL SETUSER modifier*" $e
+
+ assert_equal "" [r ACL GETUSER selector-syntax]
+ }
+
+ test {Test flexible selector definition} {
+ # Test valid selectors
+ r ACL SETUSER selector-2 "(~key1 +get )" "( ~key2 +get )" "( ~key3 +get)" "(~key4 +get)"
+ r ACL SETUSER selector-2 (~key5 +get ) ( ~key6 +get ) ( ~key7 +get) (~key8 +get)
+ set user [r ACL GETUSER "selector-2"]
+ assert_equal "~key1" [dict get [lindex [dict get $user selectors] 0] keys]
+ assert_equal "~key2" [dict get [lindex [dict get $user selectors] 1] keys]
+ assert_equal "~key3" [dict get [lindex [dict get $user selectors] 2] keys]
+ assert_equal "~key4" [dict get [lindex [dict get $user selectors] 3] keys]
+ assert_equal "~key5" [dict get [lindex [dict get $user selectors] 4] keys]
+ assert_equal "~key6" [dict get [lindex [dict get $user selectors] 5] keys]
+ assert_equal "~key7" [dict get [lindex [dict get $user selectors] 6] keys]
+ assert_equal "~key8" [dict get [lindex [dict get $user selectors] 7] keys]
+
+ # Test invalid selector syntax
+ catch {r ACL SETUSER invalid-selector " () "} err
+ assert_match "*ERR*Syntax error*" $err
+ catch {r ACL SETUSER invalid-selector (} err
+ assert_match "*Unmatched parenthesis*" $err
+ catch {r ACL SETUSER invalid-selector )} err
+ assert_match "*ERR*Syntax error" $err
+ }
+
+ test {Test separate read permission} {
+ r ACL SETUSER key-permission-R on nopass %R~read* +@all
+ $r2 auth key-permission-R password
+ assert_equal PONG [$r2 PING]
+ r set readstr bar
+ assert_equal bar [$r2 get readstr]
+ catch {$r2 set readstr bar} err
+ assert_match "*NOPERM*key*" $err
+ catch {$r2 get notread} err
+ assert_match "*NOPERM*key*" $err
+ }
+
+ test {Test separate write permission} {
+ r ACL SETUSER key-permission-W on nopass %W~write* +@all
+ $r2 auth key-permission-W password
+ assert_equal PONG [$r2 PING]
+ # Note, SET is a RW command, so it's not used for testing
+ $r2 LPUSH writelist 10
+ catch {$r2 GET writestr} err
+ assert_match "*NOPERM*key*" $err
+ catch {$r2 LPUSH notwrite 10} err
+ assert_match "*NOPERM*key*" $err
+ }
+
+ test {Test separate read and write permissions} {
+ r ACL SETUSER key-permission-RW on nopass %R~read* %W~write* +@all
+ $r2 auth key-permission-RW password
+ assert_equal PONG [$r2 PING]
+ r set read bar
+ $r2 copy read write
+ catch {$r2 copy write read} err
+ assert_match "*NOPERM*key*" $err
+ }
+
+ test {Test separate read and write permissions on different selectors are not additive} {
+ r ACL SETUSER key-permission-RW-selector on nopass "(%R~read* +@all)" "(%W~write* +@all)"
+ $r2 auth key-permission-RW-selector password
+ assert_equal PONG [$r2 PING]
+
+ # Verify write selector
+ $r2 LPUSH writelist 10
+ catch {$r2 GET writestr} err
+ assert_match "*NOPERM*key*" $err
+ catch {$r2 LPUSH notwrite 10} err
+ assert_match "*NOPERM*key*" $err
+
+ # Verify read selector
+ r set readstr bar
+ assert_equal bar [$r2 get readstr]
+ catch {$r2 set readstr bar} err
+ assert_match "*NOPERM*key*" $err
+ catch {$r2 get notread} err
+ assert_match "*NOPERM*key*" $err
+
+ # Verify they don't combine
+ catch {$r2 copy read write} err
+ assert_match "*NOPERM*key*" $err
+ catch {$r2 copy write read} err
+ assert_match "*NOPERM*key*" $err
+ }
+
+ test {Test SET with separate read permission} {
+ r del readstr
+ r ACL SETUSER set-key-permission-R on nopass %R~read* +@all
+ $r2 auth set-key-permission-R password
+ assert_equal PONG [$r2 PING]
+ assert_equal {} [$r2 get readstr]
+
+ # We don't have the permission to WRITE key.
+ assert_error {*NOPERM*key*} {$r2 set readstr bar}
+ assert_error {*NOPERM*key*} {$r2 set readstr bar get}
+ assert_error {*NOPERM*key*} {$r2 set readstr bar ex 100}
+ assert_error {*NOPERM*key*} {$r2 set readstr bar keepttl nx}
+ }
+
+ test {Test SET with separate write permission} {
+ r del writestr
+ r ACL SETUSER set-key-permission-W on nopass %W~write* +@all
+ $r2 auth set-key-permission-W password
+ assert_equal PONG [$r2 PING]
+ assert_equal {OK} [$r2 set writestr bar]
+ assert_equal {OK} [$r2 set writestr get]
+
+ # We don't have the permission to READ key.
+ assert_error {*NOPERM*key*} {$r2 set get writestr}
+ assert_error {*NOPERM*key*} {$r2 set writestr bar get}
+ assert_error {*NOPERM*key*} {$r2 set writestr bar get ex 100}
+ assert_error {*NOPERM*key*} {$r2 set writestr bar get keepttl nx}
+
+ # this probably should be `ERR value is not an integer or out of range`
+ assert_error {*NOPERM*key*} {$r2 set writestr bar ex get}
+ }
+
+ test {Test SET with read and write permissions} {
+ r del readwrite_str
+ r ACL SETUSER set-key-permission-RW-selector on nopass %RW~readwrite* +@all
+ $r2 auth set-key-permission-RW-selector password
+ assert_equal PONG [$r2 PING]
+
+ assert_equal {} [$r2 get readwrite_str]
+ assert_error {ERR * not an integer *} {$r2 set readwrite_str bar ex get}
+
+ assert_equal {OK} [$r2 set readwrite_str bar]
+ assert_equal {bar} [$r2 get readwrite_str]
+
+ assert_equal {bar} [$r2 set readwrite_str bar2 get]
+ assert_equal {bar2} [$r2 get readwrite_str]
+
+ assert_equal {bar2} [$r2 set readwrite_str bar3 get ex 10]
+ assert_equal {bar3} [$r2 get readwrite_str]
+ assert_range [$r2 ttl readwrite_str] 5 10
+ }
+
+ test {Test BITFIELD with separate read permission} {
+ r del readstr
+ r ACL SETUSER bitfield-key-permission-R on nopass %R~read* +@all
+ $r2 auth bitfield-key-permission-R password
+ assert_equal PONG [$r2 PING]
+ assert_equal {0} [$r2 bitfield readstr get u4 0]
+
+ # We don't have the permission to WRITE key.
+ assert_error {*NOPERM*key*} {$r2 bitfield readstr set u4 0 1}
+ assert_error {*NOPERM*key*} {$r2 bitfield readstr get u4 0 set u4 0 1}
+ assert_error {*NOPERM*key*} {$r2 bitfield readstr incrby u4 0 1}
+ }
+
+ test {Test BITFIELD with separate write permission} {
+ r del writestr
+ r ACL SETUSER bitfield-key-permission-W on nopass %W~write* +@all
+ $r2 auth bitfield-key-permission-W password
+ assert_equal PONG [$r2 PING]
+
+ # We don't have the permission to READ key.
+ assert_error {*NOPERM*key*} {$r2 bitfield writestr get u4 0}
+ assert_error {*NOPERM*key*} {$r2 bitfield writestr set u4 0 1}
+ assert_error {*NOPERM*key*} {$r2 bitfield writestr incrby u4 0 1}
+ }
+
+ test {Test BITFIELD with read and write permissions} {
+ r del readwrite_str
+ r ACL SETUSER bitfield-key-permission-RW-selector on nopass %RW~readwrite* +@all
+ $r2 auth bitfield-key-permission-RW-selector password
+ assert_equal PONG [$r2 PING]
+
+ assert_equal {0} [$r2 bitfield readwrite_str get u4 0]
+ assert_equal {0} [$r2 bitfield readwrite_str set u4 0 1]
+ assert_equal {2} [$r2 bitfield readwrite_str incrby u4 0 1]
+ assert_equal {2} [$r2 bitfield readwrite_str get u4 0]
+ }
+
+ test {Test ACL log correctly identifies the relevant item when selectors are used} {
+ r ACL SETUSER acl-log-test-selector on nopass
+ r ACL SETUSER acl-log-test-selector +mget ~key (+mget ~key ~otherkey)
+ $r2 auth acl-log-test-selector password
+
+ # Test that command is shown only if none of the selectors match
+ r ACL LOG RESET
+ catch {$r2 GET key} err
+ assert_match "*NOPERM*command*" $err
+ set entry [lindex [r ACL LOG] 0]
+ assert_equal [dict get $entry username] "acl-log-test-selector"
+ assert_equal [dict get $entry context] "toplevel"
+ assert_equal [dict get $entry reason] "command"
+ assert_equal [dict get $entry object] "get"
+
+ # Test two cases where the first selector matches less than the
+ # second selector. We should still show the logically first unmatched key.
+ r ACL LOG RESET
+ catch {$r2 MGET otherkey someotherkey} err
+ assert_match "*NOPERM*key*" $err
+ set entry [lindex [r ACL LOG] 0]
+ assert_equal [dict get $entry username] "acl-log-test-selector"
+ assert_equal [dict get $entry context] "toplevel"
+ assert_equal [dict get $entry reason] "key"
+ assert_equal [dict get $entry object] "someotherkey"
+
+ r ACL LOG RESET
+ catch {$r2 MGET key otherkey someotherkey} err
+ assert_match "*NOPERM*key*" $err
+ set entry [lindex [r ACL LOG] 0]
+ assert_equal [dict get $entry username] "acl-log-test-selector"
+ assert_equal [dict get $entry context] "toplevel"
+ assert_equal [dict get $entry reason] "key"
+ assert_equal [dict get $entry object] "someotherkey"
+ }
+
+ test {Test ACL GETUSER response information} {
+ r ACL setuser selector-info -@all +get resetchannels &channel1 %R~foo1 %W~bar1 ~baz1
+ r ACL setuser selector-info (-@all +set resetchannels &channel2 %R~foo2 %W~bar2 ~baz2)
+ set user [r ACL GETUSER "selector-info"]
+
+ # Root selector
+ assert_equal "%R~foo1 %W~bar1 ~baz1" [dict get $user keys]
+ assert_equal "&channel1" [dict get $user channels]
+ assert_equal "-@all +get" [dict get $user commands]
+
+ # Added selector
+ set secondary_selector [lindex [dict get $user selectors] 0]
+ assert_equal "%R~foo2 %W~bar2 ~baz2" [dict get $secondary_selector keys]
+ assert_equal "&channel2" [dict get $secondary_selector channels]
+ assert_equal "-@all +set" [dict get $secondary_selector commands]
+ }
+
+ test {Test ACL list idempotency} {
+ r ACL SETUSER user-idempotency off -@all +get resetchannels &channel1 %R~foo1 %W~bar1 ~baz1 (-@all +set resetchannels &channel2 %R~foo2 %W~bar2 ~baz2)
+ set response [lindex [r ACL LIST] [lsearch [r ACL LIST] "user user-idempotency*"]]
+
+ assert_match "*-@all*+get*(*)*" $response
+ assert_match "*resetchannels*&channel1*(*)*" $response
+ assert_match "*%R~foo1*%W~bar1*~baz1*(*)*" $response
+
+ assert_match "*(*-@all*+set*)*" $response
+ assert_match "*(*resetchannels*&channel2*)*" $response
+ assert_match "*(*%R~foo2*%W~bar2*~baz2*)*" $response
+ }
+
+ test {Test R+W is the same as all permissions} {
+ r ACL setuser selector-rw-info %R~foo %W~foo %RW~bar
+ set user [r ACL GETUSER selector-rw-info]
+ assert_equal "~foo ~bar" [dict get $user keys]
+ }
+
+ test {Test basic dry run functionality} {
+ r ACL setuser command-test +@all %R~read* %W~write* %RW~rw*
+ assert_equal "OK" [r ACL DRYRUN command-test GET read]
+
+ catch {r ACL DRYRUN not-a-user GET read} e
+ assert_equal "ERR User 'not-a-user' not found" $e
+
+ catch {r ACL DRYRUN command-test not-a-command read} e
+ assert_equal "ERR Command 'not-a-command' not found" $e
+ }
+
+ test {Test various commands for command permissions} {
+ r ACL setuser command-test -@all
+ assert_match {*has no permissions to run the 'set' command*} [r ACL DRYRUN command-test set somekey somevalue]
+ assert_match {*has no permissions to run the 'get' command*} [r ACL DRYRUN command-test get somekey]
+ }
+
+ test {Test various odd commands for key permissions} {
+ r ACL setuser command-test +@all %R~read* %W~write* %RW~rw*
+
+ # Test migrate, which is marked with incomplete keys
+ assert_equal "OK" [r ACL DRYRUN command-test MIGRATE whatever whatever rw 0 500]
+ assert_match {*has no permissions to access the 'read' key*} [r ACL DRYRUN command-test MIGRATE whatever whatever read 0 500]
+ assert_match {*has no permissions to access the 'write' key*} [r ACL DRYRUN command-test MIGRATE whatever whatever write 0 500]
+ assert_equal "OK" [r ACL DRYRUN command-test MIGRATE whatever whatever "" 0 5000 KEYS rw]
+ assert_match "*has no permissions to access the 'read' key" [r ACL DRYRUN command-test MIGRATE whatever whatever "" 0 5000 KEYS read]
+ assert_match "*has no permissions to access the 'write' key" [r ACL DRYRUN command-test MIGRATE whatever whatever "" 0 5000 KEYS write]
+ assert_equal "OK" [r ACL DRYRUN command-test MIGRATE whatever whatever "" 0 5000 AUTH KEYS KEYS rw]
+ assert_match "*has no permissions to access the 'read' key" [r ACL DRYRUN command-test MIGRATE whatever whatever "" 0 5000 AUTH KEYS KEYS read]
+ assert_match "*has no permissions to access the 'write' key" [r ACL DRYRUN command-test MIGRATE whatever whatever "" 0 5000 AUTH KEYS KEYS write]
+ assert_equal "OK" [r ACL DRYRUN command-test MIGRATE whatever whatever "" 0 5000 AUTH2 KEYS 123 KEYS rw]
+ assert_match "*has no permissions to access the 'read' key" [r ACL DRYRUN command-test MIGRATE whatever whatever "" 0 5000 AUTH2 KEYS 123 KEYS read]
+ assert_match "*has no permissions to access the 'write' key" [r ACL DRYRUN command-test MIGRATE whatever whatever "" 0 5000 AUTH2 KEYS 123 KEYS write]
+ assert_equal "OK" [r ACL DRYRUN command-test MIGRATE whatever whatever "" 0 5000 AUTH2 USER KEYS KEYS rw]
+ assert_match "*has no permissions to access the 'read' key" [r ACL DRYRUN command-test MIGRATE whatever whatever "" 0 5000 AUTH2 USER KEYS KEYS read]
+ assert_match "*has no permissions to access the 'write' key" [r ACL DRYRUN command-test MIGRATE whatever whatever "" 0 5000 AUTH2 USER KEYS KEYS write]
+
+ # Test SORT, which is marked with incomplete keys
+ assert_equal "OK" [r ACL DRYRUN command-test SORT read STORE write]
+ assert_match {*has no permissions to access the 'read' key*} [r ACL DRYRUN command-test SORT read STORE read]
+ assert_match {*has no permissions to access the 'write' key*} [r ACL DRYRUN command-test SORT write STORE write]
+
+ # Test EVAL, which uses the numkey keyspec (Also test EVAL_RO)
+ assert_equal "OK" [r ACL DRYRUN command-test EVAL "" 1 rw1]
+ assert_match {*has no permissions to access the 'read' key*} [r ACL DRYRUN command-test EVAL "" 1 read]
+ assert_equal "OK" [r ACL DRYRUN command-test EVAL_RO "" 1 rw1]
+ assert_equal "OK" [r ACL DRYRUN command-test EVAL_RO "" 1 read]
+
+ # Read is an optional argument and not a key here, make sure we don't treat it as a key
+ assert_equal "OK" [r ACL DRYRUN command-test EVAL "" 0 read]
+
+ # These are syntax errors, but it's 'OK' from an ACL perspective
+ assert_equal "OK" [r ACL DRYRUN command-test EVAL "" -1 read]
+ assert_equal "OK" [r ACL DRYRUN command-test EVAL "" 3 rw rw]
+ assert_equal "OK" [r ACL DRYRUN command-test EVAL "" 3 rw read]
+
+ # Test GEORADIUS which uses the last type of keyspec, keyword
+ assert_equal "OK" [r ACL DRYRUN command-test GEORADIUS read longitude latitude radius M STOREDIST write]
+ assert_equal "OK" [r ACL DRYRUN command-test GEORADIUS read longitude latitude radius M]
+ assert_match {*has no permissions to access the 'read2' key*} [r ACL DRYRUN command-test GEORADIUS read1 longitude latitude radius M STOREDIST read2]
+ assert_match {*has no permissions to access the 'write1' key*} [r ACL DRYRUN command-test GEORADIUS write1 longitude latitude radius M STOREDIST write2]
+ assert_equal "OK" [r ACL DRYRUN command-test GEORADIUS read longitude latitude radius M STORE write]
+ assert_equal "OK" [r ACL DRYRUN command-test GEORADIUS read longitude latitude radius M]
+ assert_match {*has no permissions to access the 'read2' key*} [r ACL DRYRUN command-test GEORADIUS read1 longitude latitude radius M STORE read2]
+ assert_match {*has no permissions to access the 'write1' key*} [r ACL DRYRUN command-test GEORADIUS write1 longitude latitude radius M STORE write2]
+ }
+
+ # Existence test commands are not marked as access since they are the result
+ # of a lot of write commands. We therefore make the claim they can be executed
+ # when either READ or WRITE flags are provided.
+ test {Existence test commands are not marked as access} {
+ assert_equal "OK" [r ACL DRYRUN command-test HEXISTS read foo]
+ assert_equal "OK" [r ACL DRYRUN command-test HEXISTS write foo]
+ assert_match {*has no permissions to access the 'nothing' key*} [r ACL DRYRUN command-test HEXISTS nothing foo]
+
+ assert_equal "OK" [r ACL DRYRUN command-test HSTRLEN read foo]
+ assert_equal "OK" [r ACL DRYRUN command-test HSTRLEN write foo]
+ assert_match {*has no permissions to access the 'nothing' key*} [r ACL DRYRUN command-test HSTRLEN nothing foo]
+
+ assert_equal "OK" [r ACL DRYRUN command-test SISMEMBER read foo]
+ assert_equal "OK" [r ACL DRYRUN command-test SISMEMBER write foo]
+ assert_match {*has no permissions to access the 'nothing' key*} [r ACL DRYRUN command-test SISMEMBER nothing foo]
+ }
+
+ # Unlike existence test commands, intersection cardinality commands process the data
+ # between keys and return an aggregated cardinality. therefore they have the access
+ # requirement.
+ test {Intersection cardinaltiy commands are access commands} {
+ assert_equal "OK" [r ACL DRYRUN command-test SINTERCARD 2 read read]
+ assert_match {*has no permissions to access the 'write' key*} [r ACL DRYRUN command-test SINTERCARD 2 write read]
+ assert_match {*has no permissions to access the 'nothing' key*} [r ACL DRYRUN command-test SINTERCARD 2 nothing read]
+
+ assert_equal "OK" [r ACL DRYRUN command-test ZCOUNT read 0 1]
+ assert_match {*has no permissions to access the 'write' key*} [r ACL DRYRUN command-test ZCOUNT write 0 1]
+ assert_match {*has no permissions to access the 'nothing' key*} [r ACL DRYRUN command-test ZCOUNT nothing 0 1]
+
+ assert_equal "OK" [r ACL DRYRUN command-test PFCOUNT read read]
+ assert_match {*has no permissions to access the 'write' key*} [r ACL DRYRUN command-test PFCOUNT write read]
+ assert_match {*has no permissions to access the 'nothing' key*} [r ACL DRYRUN command-test PFCOUNT nothing read]
+
+ assert_equal "OK" [r ACL DRYRUN command-test ZINTERCARD 2 read read]
+ assert_match {*has no permissions to access the 'write' key*} [r ACL DRYRUN command-test ZINTERCARD 2 write read]
+ assert_match {*has no permissions to access the 'nothing' key*} [r ACL DRYRUN command-test ZINTERCARD 2 nothing read]
+ }
+
+ test {Test general keyspace commands require some type of permission to execute} {
+ assert_equal "OK" [r ACL DRYRUN command-test touch read]
+ assert_equal "OK" [r ACL DRYRUN command-test touch write]
+ assert_equal "OK" [r ACL DRYRUN command-test touch rw]
+ assert_match {*has no permissions to access the 'nothing' key*} [r ACL DRYRUN command-test touch nothing]
+
+ assert_equal "OK" [r ACL DRYRUN command-test exists read]
+ assert_equal "OK" [r ACL DRYRUN command-test exists write]
+ assert_equal "OK" [r ACL DRYRUN command-test exists rw]
+ assert_match {*has no permissions to access the 'nothing' key*} [r ACL DRYRUN command-test exists nothing]
+
+ assert_equal "OK" [r ACL DRYRUN command-test MEMORY USAGE read]
+ assert_equal "OK" [r ACL DRYRUN command-test MEMORY USAGE write]
+ assert_equal "OK" [r ACL DRYRUN command-test MEMORY USAGE rw]
+ assert_match {*has no permissions to access the 'nothing' key*} [r ACL DRYRUN command-test MEMORY USAGE nothing]
+
+ assert_equal "OK" [r ACL DRYRUN command-test TYPE read]
+ assert_equal "OK" [r ACL DRYRUN command-test TYPE write]
+ assert_equal "OK" [r ACL DRYRUN command-test TYPE rw]
+ assert_match {*has no permissions to access the 'nothing' key*} [r ACL DRYRUN command-test TYPE nothing]
+ }
+
+ test {Cardinality commands require some type of permission to execute} {
+ set commands {STRLEN HLEN LLEN SCARD ZCARD XLEN}
+ foreach command $commands {
+ assert_equal "OK" [r ACL DRYRUN command-test $command read]
+ assert_equal "OK" [r ACL DRYRUN command-test $command write]
+ assert_equal "OK" [r ACL DRYRUN command-test $command rw]
+ assert_match {*has no permissions to access the 'nothing' key*} [r ACL DRYRUN command-test $command nothing]
+ }
+ }
+
+ test {Test sharded channel permissions} {
+ r ACL setuser test-channels +@all resetchannels &channel
+ assert_equal "OK" [r ACL DRYRUN test-channels spublish channel foo]
+ assert_equal "OK" [r ACL DRYRUN test-channels ssubscribe channel]
+ assert_equal "OK" [r ACL DRYRUN test-channels sunsubscribe]
+ assert_equal "OK" [r ACL DRYRUN test-channels sunsubscribe channel]
+ assert_equal "OK" [r ACL DRYRUN test-channels sunsubscribe otherchannel]
+
+ assert_match {*has no permissions to access the 'otherchannel' channel*} [r ACL DRYRUN test-channels spublish otherchannel foo]
+ assert_match {*has no permissions to access the 'otherchannel' channel*} [r ACL DRYRUN test-channels ssubscribe otherchannel foo]
+ }
+
+ test {Test sort with ACL permissions} {
+ r set v1 1
+ r lpush mylist 1
+
+ r ACL setuser test-sort-acl on nopass (+sort ~mylist)
+ $r2 auth test-sort-acl nopass
+
+ catch {$r2 sort mylist by v*} e
+ assert_equal "ERR BY option of SORT denied due to insufficient ACL permissions." $e
+ catch {$r2 sort mylist get v*} e
+ assert_equal "ERR GET option of SORT denied due to insufficient ACL permissions." $e
+
+ r ACL setuser test-sort-acl (+sort ~mylist ~v*)
+ catch {$r2 sort mylist by v*} e
+ assert_equal "ERR BY option of SORT denied due to insufficient ACL permissions." $e
+ catch {$r2 sort mylist get v*} e
+ assert_equal "ERR GET option of SORT denied due to insufficient ACL permissions." $e
+
+ r ACL setuser test-sort-acl (+sort ~mylist %W~*)
+ catch {$r2 sort mylist by v*} e
+ assert_equal "ERR BY option of SORT denied due to insufficient ACL permissions." $e
+ catch {$r2 sort mylist get v*} e
+ assert_equal "ERR GET option of SORT denied due to insufficient ACL permissions." $e
+
+ r ACL setuser test-sort-acl (+sort ~mylist %R~*)
+ assert_equal "1" [$r2 sort mylist by v*]
+
+ # cleanup
+ r ACL deluser test-sort-acl
+ r del v1 mylist
+ }
+
+ test {Test DRYRUN with wrong number of arguments} {
+ r ACL setuser test-dry-run +@all ~v*
+
+ assert_equal "OK" [r ACL DRYRUN test-dry-run SET v v]
+
+ catch {r ACL DRYRUN test-dry-run SET v} e
+ assert_equal "ERR wrong number of arguments for 'set' command" $e
+
+ catch {r ACL DRYRUN test-dry-run SET} e
+ assert_equal "ERR wrong number of arguments for 'set' command" $e
+ }
+
+ $r2 close
+}
+
+set server_path [tmpdir "selectors.acl"]
+exec cp -f tests/assets/userwithselectors.acl $server_path
+exec cp -f tests/assets/default.conf $server_path
+start_server [list overrides [list "dir" $server_path "aclfile" "userwithselectors.acl"] tags [list "external:skip"]] {
+
+ test {Test behavior of loading ACLs} {
+ set selectors [dict get [r ACL getuser alice] selectors]
+ assert_equal [llength $selectors] 1
+ set test_selector [lindex $selectors 0]
+ assert_equal "-@all +get" [dict get $test_selector "commands"]
+ assert_equal "~rw*" [dict get $test_selector "keys"]
+
+ set selectors [dict get [r ACL getuser bob] selectors]
+ assert_equal [llength $selectors] 2
+ set test_selector [lindex $selectors 0]
+ assert_equal "-@all +set" [dict get $test_selector "commands"]
+ assert_equal "%W~w*" [dict get $test_selector "keys"]
+
+ set test_selector [lindex $selectors 1]
+ assert_equal "-@all +get" [dict get $test_selector "commands"]
+ assert_equal "%R~r*" [dict get $test_selector "keys"]
+ }
+}
diff --git a/tests/unit/acl.tcl b/tests/unit/acl.tcl
new file mode 100644
index 0000000..36ef063
--- /dev/null
+++ b/tests/unit/acl.tcl
@@ -0,0 +1,1173 @@
+start_server {tags {"acl external:skip"}} {
+ test {Connections start with the default user} {
+ r ACL WHOAMI
+ } {default}
+
+ test {It is possible to create new users} {
+ r ACL setuser newuser
+ }
+
+ test {Coverage: ACL USERS} {
+ r ACL USERS
+ } {default newuser}
+
+ test {Usernames can not contain spaces or null characters} {
+ catch {r ACL setuser "a a"} err
+ set err
+ } {*Usernames can't contain spaces or null characters*}
+
+ test {New users start disabled} {
+ r ACL setuser newuser >passwd1
+ catch {r AUTH newuser passwd1} err
+ set err
+ } {*WRONGPASS*}
+
+ test {Enabling the user allows the login} {
+ r ACL setuser newuser on +acl
+ r AUTH newuser passwd1
+ r ACL WHOAMI
+ } {newuser}
+
+ test {Only the set of correct passwords work} {
+ r ACL setuser newuser >passwd2
+ catch {r AUTH newuser passwd1} e
+ assert {$e eq "OK"}
+ catch {r AUTH newuser passwd2} e
+ assert {$e eq "OK"}
+ catch {r AUTH newuser passwd3} e
+ set e
+ } {*WRONGPASS*}
+
+ test {It is possible to remove passwords from the set of valid ones} {
+ r ACL setuser newuser <passwd1
+ catch {r AUTH newuser passwd1} e
+ set e
+ } {*WRONGPASS*}
+
+ test {Test password hashes can be added} {
+ r ACL setuser newuser #34344e4d60c2b6d639b7bd22e18f2b0b91bc34bf0ac5f9952744435093cfb4e6
+ catch {r AUTH newuser passwd4} e
+ assert {$e eq "OK"}
+ }
+
+ test {Test password hashes validate input} {
+ # Validate Length
+ catch {r ACL setuser newuser #34344e4d60c2b6d639b7bd22e18f2b0b91bc34bf0ac5f9952744435093cfb4e} e
+ # Validate character outside set
+ catch {r ACL setuser newuser #34344e4d60c2b6d639b7bd22e18f2b0b91bc34bf0ac5f9952744435093cfb4eq} e
+ set e
+ } {*Error in ACL SETUSER modifier*}
+
+ test {ACL GETUSER returns the password hash instead of the actual password} {
+ set passstr [dict get [r ACL getuser newuser] passwords]
+ assert_match {*34344e4d60c2b6d639b7bd22e18f2b0b91bc34bf0ac5f9952744435093cfb4e6*} $passstr
+ assert_no_match {*passwd4*} $passstr
+ }
+
+ test {Test hashed passwords removal} {
+ r ACL setuser newuser !34344e4d60c2b6d639b7bd22e18f2b0b91bc34bf0ac5f9952744435093cfb4e6
+ set passstr [dict get [r ACL getuser newuser] passwords]
+ assert_no_match {*34344e4d60c2b6d639b7bd22e18f2b0b91bc34bf0ac5f9952744435093cfb4e6*} $passstr
+ }
+
+ test {By default users are not able to access any command} {
+ catch {r SET foo bar} e
+ set e
+ } {*NOPERM*set*}
+
+ test {By default users are not able to access any key} {
+ r ACL setuser newuser +set
+ catch {r SET foo bar} e
+ set e
+ } {*NOPERM*key*}
+
+ test {It's possible to allow the access of a subset of keys} {
+ r ACL setuser newuser allcommands ~foo:* ~bar:*
+ r SET foo:1 a
+ r SET bar:2 b
+ catch {r SET zap:3 c} e
+ r ACL setuser newuser allkeys; # Undo keys ACL
+ set e
+ } {*NOPERM*key*}
+
+ test {By default, only default user is able to publish to any channel} {
+ r AUTH default pwd
+ r PUBLISH foo bar
+ r ACL setuser psuser on >pspass +acl +client +@pubsub
+ r AUTH psuser pspass
+ catch {r PUBLISH foo bar} e
+ set e
+ } {*NOPERM*channel*}
+
+ test {By default, only default user is not able to publish to any shard channel} {
+ r AUTH default pwd
+ r SPUBLISH foo bar
+ r AUTH psuser pspass
+ catch {r SPUBLISH foo bar} e
+ set e
+ } {*NOPERM*channel*}
+
+ test {By default, only default user is able to subscribe to any channel} {
+ set rd [redis_deferring_client]
+ $rd AUTH default pwd
+ $rd read
+ $rd SUBSCRIBE foo
+ assert_match {subscribe foo 1} [$rd read]
+ $rd UNSUBSCRIBE
+ $rd read
+ $rd AUTH psuser pspass
+ $rd read
+ $rd SUBSCRIBE foo
+ catch {$rd read} e
+ $rd close
+ set e
+ } {*NOPERM*channel*}
+
+ test {By default, only default user is able to subscribe to any shard channel} {
+ set rd [redis_deferring_client]
+ $rd AUTH default pwd
+ $rd read
+ $rd SSUBSCRIBE foo
+ assert_match {ssubscribe foo 1} [$rd read]
+ $rd SUNSUBSCRIBE
+ $rd read
+ $rd AUTH psuser pspass
+ $rd read
+ $rd SSUBSCRIBE foo
+ catch {$rd read} e
+ $rd close
+ set e
+ } {*NOPERM*channel*}
+
+ test {By default, only default user is able to subscribe to any pattern} {
+ set rd [redis_deferring_client]
+ $rd AUTH default pwd
+ $rd read
+ $rd PSUBSCRIBE bar*
+ assert_match {psubscribe bar\* 1} [$rd read]
+ $rd PUNSUBSCRIBE
+ $rd read
+ $rd AUTH psuser pspass
+ $rd read
+ $rd PSUBSCRIBE bar*
+ catch {$rd read} e
+ $rd close
+ set e
+ } {*NOPERM*channel*}
+
+ test {It's possible to allow publishing to a subset of channels} {
+ r ACL setuser psuser resetchannels &foo:1 &bar:*
+ assert_equal {0} [r PUBLISH foo:1 somemessage]
+ assert_equal {0} [r PUBLISH bar:2 anothermessage]
+ catch {r PUBLISH zap:3 nosuchmessage} e
+ set e
+ } {*NOPERM*channel*}
+
+ test {It's possible to allow publishing to a subset of shard channels} {
+ r ACL setuser psuser resetchannels &foo:1 &bar:*
+ assert_equal {0} [r SPUBLISH foo:1 somemessage]
+ assert_equal {0} [r SPUBLISH bar:2 anothermessage]
+ catch {r SPUBLISH zap:3 nosuchmessage} e
+ set e
+ } {*NOPERM*channel*}
+
+ test {Validate subset of channels is prefixed with resetchannels flag} {
+ r ACL setuser hpuser on nopass resetchannels &foo +@all
+
+ # Verify resetchannels flag is prefixed before the channel name(s)
+ set users [r ACL LIST]
+ set curruser "hpuser"
+ foreach user [lshuffle $users] {
+ if {[string first $curruser $user] != -1} {
+ assert_equal {user hpuser on nopass sanitize-payload resetchannels &foo +@all} $user
+ }
+ }
+
+ # authenticate as hpuser
+ r AUTH hpuser pass
+
+ assert_equal {0} [r PUBLISH foo bar]
+ catch {r PUBLISH bar game} e
+
+ # Falling back to psuser for the below tests
+ r AUTH psuser pspass
+ r ACL deluser hpuser
+ set e
+ } {*NOPERM*channel*}
+
+ test {In transaction queue publish/subscribe/psubscribe to unauthorized channel will fail} {
+ r ACL setuser psuser +multi +discard
+ r MULTI
+ assert_error {*NOPERM*channel*} {r PUBLISH notexits helloworld}
+ r DISCARD
+ r MULTI
+ assert_error {*NOPERM*channel*} {r SUBSCRIBE notexits foo:1}
+ r DISCARD
+ r MULTI
+ assert_error {*NOPERM*channel*} {r PSUBSCRIBE notexits:* bar:*}
+ r DISCARD
+ }
+
+ test {It's possible to allow subscribing to a subset of channels} {
+ set rd [redis_deferring_client]
+ $rd AUTH psuser pspass
+ $rd read
+ $rd SUBSCRIBE foo:1
+ assert_match {subscribe foo:1 1} [$rd read]
+ $rd SUBSCRIBE bar:2
+ assert_match {subscribe bar:2 2} [$rd read]
+ $rd SUBSCRIBE zap:3
+ catch {$rd read} e
+ set e
+ } {*NOPERM*channel*}
+
+ test {It's possible to allow subscribing to a subset of shard channels} {
+ set rd [redis_deferring_client]
+ $rd AUTH psuser pspass
+ $rd read
+ $rd SSUBSCRIBE foo:1
+ assert_match {ssubscribe foo:1 1} [$rd read]
+ $rd SSUBSCRIBE bar:2
+ assert_match {ssubscribe bar:2 2} [$rd read]
+ $rd SSUBSCRIBE zap:3
+ catch {$rd read} e
+ set e
+ } {*NOPERM*channel*}
+
+ test {It's possible to allow subscribing to a subset of channel patterns} {
+ set rd [redis_deferring_client]
+ $rd AUTH psuser pspass
+ $rd read
+ $rd PSUBSCRIBE foo:1
+ assert_match {psubscribe foo:1 1} [$rd read]
+ $rd PSUBSCRIBE bar:*
+ assert_match {psubscribe bar:\* 2} [$rd read]
+ $rd PSUBSCRIBE bar:baz
+ catch {$rd read} e
+ set e
+ } {*NOPERM*channel*}
+
+ test {Subscribers are killed when revoked of channel permission} {
+ set rd [redis_deferring_client]
+ r ACL setuser psuser resetchannels &foo:1
+ $rd AUTH psuser pspass
+ $rd read
+ $rd CLIENT SETNAME deathrow
+ $rd read
+ $rd SUBSCRIBE foo:1
+ $rd read
+ r ACL setuser psuser resetchannels
+ assert_no_match {*deathrow*} [r CLIENT LIST]
+ $rd close
+ } {0}
+
+ test {Subscribers are killed when revoked of channel permission} {
+ set rd [redis_deferring_client]
+ r ACL setuser psuser resetchannels &foo:1
+ $rd AUTH psuser pspass
+ $rd read
+ $rd CLIENT SETNAME deathrow
+ $rd read
+ $rd SSUBSCRIBE foo:1
+ $rd read
+ r ACL setuser psuser resetchannels
+ assert_no_match {*deathrow*} [r CLIENT LIST]
+ $rd close
+ } {0}
+
+ test {Subscribers are killed when revoked of pattern permission} {
+ set rd [redis_deferring_client]
+ r ACL setuser psuser resetchannels &bar:*
+ $rd AUTH psuser pspass
+ $rd read
+ $rd CLIENT SETNAME deathrow
+ $rd read
+ $rd PSUBSCRIBE bar:*
+ $rd read
+ r ACL setuser psuser resetchannels
+ assert_no_match {*deathrow*} [r CLIENT LIST]
+ $rd close
+ } {0}
+
+ test {Subscribers are killed when revoked of allchannels permission} {
+ set rd [redis_deferring_client]
+ r ACL setuser psuser allchannels
+ $rd AUTH psuser pspass
+ $rd read
+ $rd CLIENT SETNAME deathrow
+ $rd read
+ $rd PSUBSCRIBE foo
+ $rd read
+ r ACL setuser psuser resetchannels
+ assert_no_match {*deathrow*} [r CLIENT LIST]
+ $rd close
+ } {0}
+
+ test {Subscribers are pardoned if literal permissions are retained and/or gaining allchannels} {
+ set rd [redis_deferring_client]
+ r ACL setuser psuser resetchannels &foo:1 &bar:* &orders
+ $rd AUTH psuser pspass
+ $rd read
+ $rd CLIENT SETNAME pardoned
+ $rd read
+ $rd SUBSCRIBE foo:1
+ $rd read
+ $rd SSUBSCRIBE orders
+ $rd read
+ $rd PSUBSCRIBE bar:*
+ $rd read
+ r ACL setuser psuser resetchannels &foo:1 &bar:* &orders &baz:qaz &zoo:*
+ assert_match {*pardoned*} [r CLIENT LIST]
+ r ACL setuser psuser allchannels
+ assert_match {*pardoned*} [r CLIENT LIST]
+ $rd close
+ } {0}
+
+ test {blocked command gets rejected when reprocessed after permission change} {
+ r auth default ""
+ r config resetstat
+ set rd [redis_deferring_client]
+ r ACL setuser psuser reset on nopass +@all allkeys
+ $rd AUTH psuser pspass
+ $rd read
+ $rd BLPOP list1 0
+ wait_for_blocked_client
+ r ACL setuser psuser resetkeys
+ r LPUSH list1 foo
+ assert_error {*NOPERM No permissions to access a key*} {$rd read}
+ $rd ping
+ $rd close
+ assert_match {*calls=0,usec=0,*,rejected_calls=1,failed_calls=0} [cmdrstat blpop r]
+ }
+
+ test {Users can be configured to authenticate with any password} {
+ r ACL setuser newuser nopass
+ r AUTH newuser zipzapblabla
+ } {OK}
+
+ test {ACLs can exclude single commands} {
+ r ACL setuser newuser -ping
+ r INCR mycounter ; # Should not raise an error
+ catch {r PING} e
+ set e
+ } {*NOPERM*ping*}
+
+ test {ACLs can include or exclude whole classes of commands} {
+ r ACL setuser newuser -@all +@set +acl
+ r SADD myset a b c; # Should not raise an error
+ r ACL setuser newuser +@all -@string
+ r SADD myset a b c; # Again should not raise an error
+ # String commands instead should raise an error
+ catch {r SET foo bar} e
+ r ACL setuser newuser allcommands; # Undo commands ACL
+ set e
+ } {*NOPERM*set*}
+
+ test {ACLs can include single subcommands} {
+ r ACL setuser newuser +@all -client
+ r ACL setuser newuser +client|id +client|setname
+ set cmdstr [dict get [r ACL getuser newuser] commands]
+ assert_match {+@all*-client*+client|id*} $cmdstr
+ assert_match {+@all*-client*+client|setname*} $cmdstr
+ r CLIENT ID; # Should not fail
+ r CLIENT SETNAME foo ; # Should not fail
+ catch {r CLIENT KILL type master} e
+ set e
+ } {*NOPERM*client|kill*}
+
+ test {ACLs can exclude single subcommands, case 1} {
+ r ACL setuser newuser +@all -client|kill
+ set cmdstr [dict get [r ACL getuser newuser] commands]
+ assert_equal {+@all -client|kill} $cmdstr
+ r CLIENT ID; # Should not fail
+ r CLIENT SETNAME foo ; # Should not fail
+ catch {r CLIENT KILL type master} e
+ set e
+ } {*NOPERM*client|kill*}
+
+ test {ACLs can exclude single subcommands, case 2} {
+ r ACL setuser newuser -@all +acl +config -config|set
+ set cmdstr [dict get [r ACL getuser newuser] commands]
+ assert_match {*+config*} $cmdstr
+ assert_match {*-config|set*} $cmdstr
+ r CONFIG GET loglevel; # Should not fail
+ catch {r CONFIG SET loglevel debug} e
+ set e
+ } {*NOPERM*config|set*}
+
+ test {ACLs cannot include a subcommand with a specific arg} {
+ r ACL setuser newuser +@all -config|get
+ catch { r ACL setuser newuser +config|get|appendonly} e
+ set e
+ } {*Allowing first-arg of a subcommand is not supported*}
+
+ test {ACLs cannot exclude or include a container commands with a specific arg} {
+ r ACL setuser newuser +@all +config|get
+ catch { r ACL setuser newuser +@all +config|asdf} e
+ assert_match "*Unknown command or category name in ACL*" $e
+ catch { r ACL setuser newuser +@all -config|asdf} e
+ assert_match "*Unknown command or category name in ACL*" $e
+ } {}
+
+ test {ACLs cannot exclude or include a container command with two args} {
+ r ACL setuser newuser +@all +config|get
+ catch { r ACL setuser newuser +@all +get|key1|key2} e
+ assert_match "*Unknown command or category name in ACL*" $e
+ catch { r ACL setuser newuser +@all -get|key1|key2} e
+ assert_match "*Unknown command or category name in ACL*" $e
+ } {}
+
+ test {ACLs including of a type includes also subcommands} {
+ r ACL setuser newuser -@all +del +acl +@stream
+ r DEL key
+ r XADD key * field value
+ r XINFO STREAM key
+ }
+
+ test {ACLs can block SELECT of all but a specific DB} {
+ r ACL setuser newuser -@all +acl +select|0
+ set cmdstr [dict get [r ACL getuser newuser] commands]
+ assert_match {*+select|0*} $cmdstr
+ r SELECT 0
+ catch {r SELECT 1} e
+ set e
+ } {*NOPERM*select*} {singledb:skip}
+
+ test {ACLs can block all DEBUG subcommands except one} {
+ r ACL setuser newuser -@all +acl +del +incr +debug|object
+ r DEL key
+ set cmdstr [dict get [r ACL getuser newuser] commands]
+ assert_match {*+debug|object*} $cmdstr
+ r INCR key
+ r DEBUG OBJECT key
+ catch {r DEBUG SEGFAULT} e
+ set e
+ } {*NOPERM*debug*}
+
+ test {ACLs set can include subcommands, if already full command exists} {
+ r ACL setuser bob +memory|doctor
+ set cmdstr [dict get [r ACL getuser bob] commands]
+ assert_equal {-@all +memory|doctor} $cmdstr
+
+ # Validate the commands have got engulfed to +memory.
+ r ACL setuser bob +memory
+ set cmdstr [dict get [r ACL getuser bob] commands]
+ assert_equal {-@all +memory} $cmdstr
+
+ # Appending to the existing access string of bob.
+ r ACL setuser bob +@all +client|id
+ # Although this does nothing, we retain it anyways so we can reproduce
+ # the original ACL.
+ set cmdstr [dict get [r ACL getuser bob] commands]
+ assert_equal {+@all +client|id} $cmdstr
+
+ r ACL setuser bob >passwd1 on
+ r AUTH bob passwd1
+ r CLIENT ID; # Should not fail
+ r MEMORY DOCTOR; # Should not fail
+ }
+
+ test {ACLs set can exclude subcommands, if already full command exists} {
+ r ACL setuser alice +@all -memory|doctor
+ set cmdstr [dict get [r ACL getuser alice] commands]
+ assert_equal {+@all -memory|doctor} $cmdstr
+
+ r ACL setuser alice >passwd1 on
+ r AUTH alice passwd1
+
+ assert_error {*NOPERM*memory|doctor*} {r MEMORY DOCTOR}
+ r MEMORY STATS ;# should work
+
+ # Validate the commands have got engulfed to -memory.
+ r ACL setuser alice +@all -memory
+ set cmdstr [dict get [r ACL getuser alice] commands]
+ assert_equal {+@all -memory} $cmdstr
+
+ assert_error {*NOPERM*memory|doctor*} {r MEMORY DOCTOR}
+ assert_error {*NOPERM*memory|stats*} {r MEMORY STATS}
+
+ # Appending to the existing access string of alice.
+ r ACL setuser alice -@all
+
+ # Now, alice can't do anything, we need to auth newuser to execute ACL GETUSER
+ r AUTH newuser passwd1
+
+ # Validate the new commands has got engulfed to -@all.
+ set cmdstr [dict get [r ACL getuser alice] commands]
+ assert_equal {-@all} $cmdstr
+
+ r AUTH alice passwd1
+
+ assert_error {*NOPERM*get*} {r GET key}
+ assert_error {*NOPERM*memory|stats*} {r MEMORY STATS}
+
+ # Auth newuser before the next test
+ r AUTH newuser passwd1
+ }
+
+ test {ACL SETUSER RESET reverting to default newly created user} {
+ set current_user "example"
+ r ACL DELUSER $current_user
+ r ACL SETUSER $current_user
+
+ set users [r ACL LIST]
+ foreach user [lshuffle $users] {
+ if {[string first $current_user $user] != -1} {
+ set current_user_output $user
+ }
+ }
+
+ r ACL SETUSER $current_user reset
+ set users [r ACL LIST]
+ foreach user [lshuffle $users] {
+ if {[string first $current_user $user] != -1} {
+ assert_equal $current_user_output $user
+ }
+ }
+ }
+
+ # Note that the order of the generated ACL rules is not stable in Redis
+ # so we need to match the different parts and not as a whole string.
+ test {ACL GETUSER is able to translate back command permissions} {
+ # Subtractive
+ r ACL setuser newuser reset +@all ~* -@string +incr -debug +debug|digest
+ set cmdstr [dict get [r ACL getuser newuser] commands]
+ assert_match {*+@all*} $cmdstr
+ assert_match {*-@string*} $cmdstr
+ assert_match {*+incr*} $cmdstr
+ assert_match {*-debug +debug|digest**} $cmdstr
+
+ # Additive
+ r ACL setuser newuser reset +@string -incr +acl +debug|digest +debug|segfault
+ set cmdstr [dict get [r ACL getuser newuser] commands]
+ assert_match {*-@all*} $cmdstr
+ assert_match {*+@string*} $cmdstr
+ assert_match {*-incr*} $cmdstr
+ assert_match {*+debug|digest*} $cmdstr
+ assert_match {*+debug|segfault*} $cmdstr
+ assert_match {*+acl*} $cmdstr
+ }
+
+ # A regression test make sure that as long as there is a simple
+ # category defining the commands, that it will be used as is.
+ test {ACL GETUSER provides reasonable results} {
+ set categories [r ACL CAT]
+
+ # Test that adding each single category will
+ # result in just that category with both +@all and -@all
+ foreach category $categories {
+ # Test for future commands where allowed
+ r ACL setuser additive reset +@all "-@$category"
+ set cmdstr [dict get [r ACL getuser additive] commands]
+ assert_equal "+@all -@$category" $cmdstr
+
+ # Test for future commands where disallowed
+ r ACL setuser restrictive reset -@all "+@$category"
+ set cmdstr [dict get [r ACL getuser restrictive] commands]
+ assert_equal "-@all +@$category" $cmdstr
+ }
+ }
+
+ # Test that only lossless compaction of ACLs occur.
+ test {ACL GETUSER provides correct results} {
+ r ACL SETUSER adv-test
+ r ACL SETUSER adv-test +@all -@hash -@slow +hget
+ assert_equal "+@all -@hash -@slow +hget" [dict get [r ACL getuser adv-test] commands]
+
+ # Categories are re-ordered if re-added
+ r ACL SETUSER adv-test -@hash
+ assert_equal "+@all -@slow +hget -@hash" [dict get [r ACL getuser adv-test] commands]
+
+ # Inverting categories removes existing categories
+ r ACL SETUSER adv-test +@hash
+ assert_equal "+@all -@slow +hget +@hash" [dict get [r ACL getuser adv-test] commands]
+
+ # Inverting the all category compacts everything
+ r ACL SETUSER adv-test -@all
+ assert_equal "-@all" [dict get [r ACL getuser adv-test] commands]
+ r ACL SETUSER adv-test -@string -@slow +@all
+ assert_equal "+@all" [dict get [r ACL getuser adv-test] commands]
+
+ # Make sure categories are case insensitive
+ r ACL SETUSER adv-test -@all +@HASH +@hash +@HaSh
+ assert_equal "-@all +@hash" [dict get [r ACL getuser adv-test] commands]
+
+ # Make sure commands are case insensitive
+ r ACL SETUSER adv-test -@all +HGET +hget +hGeT
+ assert_equal "-@all +hget" [dict get [r ACL getuser adv-test] commands]
+
+ # Arbitrary category additions and removals are handled
+ r ACL SETUSER adv-test -@all +@hash +@slow +@set +@set +@slow +@hash
+ assert_equal "-@all +@set +@slow +@hash" [dict get [r ACL getuser adv-test] commands]
+
+ # Arbitrary command additions and removals are handled
+ r ACL SETUSER adv-test -@all +hget -hset +hset -hget
+ assert_equal "-@all +hset -hget" [dict get [r ACL getuser adv-test] commands]
+
+ # Arbitrary subcommands are compacted
+ r ACL SETUSER adv-test -@all +client|list +client|list +config|get +config +acl|list -acl
+ assert_equal "-@all +client|list +config -acl" [dict get [r ACL getuser adv-test] commands]
+
+ # Deprecated subcommand usage is handled
+ r ACL SETUSER adv-test -@all +select|0 +select|0 +debug|segfault +debug
+ assert_equal "-@all +select|0 +debug" [dict get [r ACL getuser adv-test] commands]
+
+ # Unnecessary categories are retained for potentional future compatibility
+ r ACL SETUSER adv-test -@all -@dangerous
+ assert_equal "-@all -@dangerous" [dict get [r ACL getuser adv-test] commands]
+
+ # Duplicate categories are compressed, regression test for #12470
+ r ACL SETUSER adv-test -@all +config +config|get -config|set +config
+ assert_equal "-@all +config" [dict get [r ACL getuser adv-test] commands]
+ }
+
+ test "ACL CAT with illegal arguments" {
+ assert_error {*Unknown category 'NON_EXISTS'} {r ACL CAT NON_EXISTS}
+ assert_error {*unknown subcommand or wrong number of arguments for 'CAT'*} {r ACL CAT NON_EXISTS NON_EXISTS2}
+ }
+
+ test "ACL CAT without category - list all categories" {
+ set categories [r acl cat]
+ assert_not_equal [lsearch $categories "keyspace"] -1
+ assert_not_equal [lsearch $categories "connection"] -1
+ }
+
+ test "ACL CAT category - list all commands/subcommands that belong to category" {
+ assert_not_equal [lsearch [r acl cat transaction] "multi"] -1
+ assert_not_equal [lsearch [r acl cat scripting] "function|list"] -1
+
+ # Negative check to make sure it doesn't actually return all commands.
+ assert_equal [lsearch [r acl cat keyspace] "set"] -1
+ assert_equal [lsearch [r acl cat stream] "get"] -1
+ }
+
+ test "ACL requires explicit permission for scripting for EVAL_RO, EVALSHA_RO and FCALL_RO" {
+ r ACL SETUSER scripter on nopass +readonly
+ assert_match {*has no permissions to run the 'eval_ro' command*} [r ACL DRYRUN scripter EVAL_RO "" 0]
+ assert_match {*has no permissions to run the 'evalsha_ro' command*} [r ACL DRYRUN scripter EVALSHA_RO "" 0]
+ assert_match {*has no permissions to run the 'fcall_ro' command*} [r ACL DRYRUN scripter FCALL_RO "" 0]
+ }
+
+ test {ACL #5998 regression: memory leaks adding / removing subcommands} {
+ r AUTH default ""
+ r ACL setuser newuser reset -debug +debug|a +debug|b +debug|c
+ r ACL setuser newuser -debug
+ # The test framework will detect a leak if any.
+ }
+
+ test {ACL LOG aggregates similar errors together and assigns unique entry-id to new errors} {
+ r ACL LOG RESET
+ r ACL setuser user1 >foo
+ assert_error "*WRONGPASS*" {r AUTH user1 doo}
+ set entry_id_initial_error [dict get [lindex [r ACL LOG] 0] entry-id]
+ set timestamp_created_original [dict get [lindex [r ACL LOG] 0] timestamp-created]
+ set timestamp_last_update_original [dict get [lindex [r ACL LOG] 0] timestamp-last-updated]
+ after 1
+ for {set j 0} {$j < 10} {incr j} {
+ assert_error "*WRONGPASS*" {r AUTH user1 doo}
+ }
+ set entry_id_lastest_error [dict get [lindex [r ACL LOG] 0] entry-id]
+ set timestamp_created_updated [dict get [lindex [r ACL LOG] 0] timestamp-created]
+ set timestamp_last_updated_after_update [dict get [lindex [r ACL LOG] 0] timestamp-last-updated]
+ assert {$entry_id_lastest_error eq $entry_id_initial_error}
+ assert {$timestamp_last_update_original < $timestamp_last_updated_after_update}
+ assert {$timestamp_created_original eq $timestamp_created_updated}
+ r ACL setuser user2 >doo
+ assert_error "*WRONGPASS*" {r AUTH user2 foo}
+ set new_error_entry_id [dict get [lindex [r ACL LOG] 0] entry-id]
+ assert {$new_error_entry_id eq $entry_id_lastest_error + 1 }
+ }
+
+ test {ACL LOG shows failed command executions at toplevel} {
+ r ACL LOG RESET
+ r ACL setuser antirez >foo on +set ~object:1234
+ r ACL setuser antirez +eval +multi +exec
+ r ACL setuser antirez resetchannels +publish
+ r AUTH antirez foo
+ assert_error "*NOPERM*get*" {r GET foo}
+ r AUTH default ""
+ set entry [lindex [r ACL LOG] 0]
+ assert {[dict get $entry username] eq {antirez}}
+ assert {[dict get $entry context] eq {toplevel}}
+ assert {[dict get $entry reason] eq {command}}
+ assert {[dict get $entry object] eq {get}}
+ assert_match {*cmd=get*} [dict get $entry client-info]
+ }
+
+ test "ACL LOG shows failed subcommand executions at toplevel" {
+ r ACL LOG RESET
+ r ACL DELUSER demo
+ r ACL SETUSER demo on nopass
+ r AUTH demo ""
+ assert_error "*NOPERM*script|help*" {r SCRIPT HELP}
+ r AUTH default ""
+ set entry [lindex [r ACL LOG] 0]
+ assert_equal [dict get $entry username] {demo}
+ assert_equal [dict get $entry context] {toplevel}
+ assert_equal [dict get $entry reason] {command}
+ assert_equal [dict get $entry object] {script|help}
+ }
+
+ test {ACL LOG is able to test similar events} {
+ r ACL LOG RESET
+ r AUTH antirez foo
+ catch {r GET foo}
+ catch {r GET foo}
+ catch {r GET foo}
+ r AUTH default ""
+ set entry [lindex [r ACL LOG] 0]
+ assert {[dict get $entry count] == 3}
+ }
+
+ test {ACL LOG is able to log keys access violations and key name} {
+ r AUTH antirez foo
+ catch {r SET somekeynotallowed 1234}
+ r AUTH default ""
+ set entry [lindex [r ACL LOG] 0]
+ assert {[dict get $entry reason] eq {key}}
+ assert {[dict get $entry object] eq {somekeynotallowed}}
+ }
+
+ test {ACL LOG is able to log channel access violations and channel name} {
+ r AUTH antirez foo
+ catch {r PUBLISH somechannelnotallowed nullmsg}
+ r AUTH default ""
+ set entry [lindex [r ACL LOG] 0]
+ assert {[dict get $entry reason] eq {channel}}
+ assert {[dict get $entry object] eq {somechannelnotallowed}}
+ }
+
+ test {ACL LOG RESET is able to flush the entries in the log} {
+ r ACL LOG RESET
+ assert {[llength [r ACL LOG]] == 0}
+ }
+
+ test {ACL LOG can distinguish the transaction context (1)} {
+ r AUTH antirez foo
+ r MULTI
+ catch {r INCR foo}
+ catch {r EXEC}
+ r AUTH default ""
+ set entry [lindex [r ACL LOG] 0]
+ assert {[dict get $entry context] eq {multi}}
+ assert {[dict get $entry object] eq {incr}}
+ }
+
+ test {ACL LOG can distinguish the transaction context (2)} {
+ set rd1 [redis_deferring_client]
+ r ACL SETUSER antirez +incr
+
+ r AUTH antirez foo
+ r MULTI
+ r INCR object:1234
+ $rd1 ACL SETUSER antirez -incr
+ $rd1 read
+ catch {r EXEC}
+ $rd1 close
+ r AUTH default ""
+ set entry [lindex [r ACL LOG] 0]
+ assert {[dict get $entry context] eq {multi}}
+ assert {[dict get $entry object] eq {incr}}
+ assert_match {*cmd=exec*} [dict get $entry client-info]
+ r ACL SETUSER antirez -incr
+ }
+
+ test {ACL can log errors in the context of Lua scripting} {
+ r AUTH antirez foo
+ catch {r EVAL {redis.call('incr','foo')} 0}
+ r AUTH default ""
+ set entry [lindex [r ACL LOG] 0]
+ assert {[dict get $entry context] eq {lua}}
+ assert {[dict get $entry object] eq {incr}}
+ assert_match {*cmd=eval*} [dict get $entry client-info]
+ }
+
+ test {ACL LOG can accept a numerical argument to show less entries} {
+ r AUTH antirez foo
+ catch {r INCR foo}
+ catch {r INCR foo}
+ catch {r INCR foo}
+ catch {r INCR foo}
+ r AUTH default ""
+ assert {[llength [r ACL LOG]] > 1}
+ assert {[llength [r ACL LOG 2]] == 2}
+ }
+
+ test {ACL LOG can log failed auth attempts} {
+ catch {r AUTH antirez wrong-password}
+ set entry [lindex [r ACL LOG] 0]
+ assert {[dict get $entry context] eq {toplevel}}
+ assert {[dict get $entry reason] eq {auth}}
+ assert {[dict get $entry object] eq {AUTH}}
+ assert {[dict get $entry username] eq {antirez}}
+ }
+
+ test {ACL LOG entries are limited to a maximum amount} {
+ r ACL LOG RESET
+ r CONFIG SET acllog-max-len 5
+ r AUTH antirez foo
+ for {set j 0} {$j < 10} {incr j} {
+ catch {r SET obj:$j 123}
+ }
+ r AUTH default ""
+ assert {[llength [r ACL LOG]] == 5}
+ }
+
+ test {When default user is off, new connections are not authenticated} {
+ r ACL setuser default off
+ catch {set rd1 [redis_deferring_client]} e
+ r ACL setuser default on
+ set e
+ } {*NOAUTH*}
+
+ test {When default user has no command permission, hello command still works for other users} {
+ r ACL setuser secure-user >supass on +@all
+ r ACL setuser default -@all
+ r HELLO 2 AUTH secure-user supass
+ r ACL setuser default nopass +@all
+ r AUTH default ""
+ }
+
+ test {When an authentication chain is used in the HELLO cmd, the last auth cmd has precedence} {
+ r ACL setuser secure-user1 >supass on +@all
+ r ACL setuser secure-user2 >supass on +@all
+ r HELLO 2 AUTH secure-user pass AUTH secure-user2 supass AUTH secure-user1 supass
+ assert {[r ACL whoami] eq {secure-user1}}
+ catch {r HELLO 2 AUTH secure-user supass AUTH secure-user2 supass AUTH secure-user pass} e
+ assert_match "WRONGPASS invalid username-password pair or user is disabled." $e
+ assert {[r ACL whoami] eq {secure-user1}}
+ }
+
+ test {When a setname chain is used in the HELLO cmd, the last setname cmd has precedence} {
+ r HELLO 2 setname client1 setname client2 setname client3 setname client4
+ assert {[r client getname] eq {client4}}
+ catch {r HELLO 2 setname client5 setname client6 setname "client name"} e
+ assert_match "ERR Client names cannot contain spaces, newlines or special characters." $e
+ assert {[r client getname] eq {client4}}
+ }
+
+ test {When authentication fails in the HELLO cmd, the client setname should not be applied} {
+ r client setname client0
+ catch {r HELLO 2 AUTH user pass setname client1} e
+ assert_match "WRONGPASS invalid username-password pair or user is disabled." $e
+ assert {[r client getname] eq {client0}}
+ }
+
+ test {ACL HELP should not have unexpected options} {
+ catch {r ACL help xxx} e
+ assert_match "*wrong number of arguments for 'acl|help' command" $e
+ }
+
+ test {Delete a user that the client doesn't use} {
+ r ACL setuser not_used on >passwd
+ assert {[r ACL deluser not_used] == 1}
+ # The client is not closed
+ assert {[r ping] eq {PONG}}
+ }
+
+ test {Delete a user that the client is using} {
+ r ACL setuser using on +acl >passwd
+ r AUTH using passwd
+ # The client will receive reply normally
+ assert {[r ACL deluser using] == 1}
+ # The client is closed
+ catch {[r ping]} e
+ assert_match "*I/O error*" $e
+ }
+
+ test {ACL GENPASS command failed test} {
+ catch {r ACL genpass -236} err1
+ catch {r ACL genpass 5000} err2
+ assert_match "*ACL GENPASS argument must be the number*" $err1
+ assert_match "*ACL GENPASS argument must be the number*" $err2
+ }
+
+ test {Default user can not be removed} {
+ catch {r ACL deluser default} err
+ set err
+ } {ERR The 'default' user cannot be removed}
+
+ test {ACL load non-existing configured ACL file} {
+ catch {r ACL load} err
+ set err
+ } {*Redis instance is not configured to use an ACL file*}
+
+ # If there is an AUTH failure the metric increases
+ test {ACL-Metrics user AUTH failure} {
+ set current_auth_failures [s acl_access_denied_auth]
+ set current_invalid_cmd_accesses [s acl_access_denied_cmd]
+ set current_invalid_key_accesses [s acl_access_denied_key]
+ set current_invalid_channel_accesses [s acl_access_denied_channel]
+ assert_error "*WRONGPASS*" {r AUTH notrealuser 1233456}
+ assert {[s acl_access_denied_auth] eq [expr $current_auth_failures + 1]}
+ assert_error "*WRONGPASS*" {r HELLO 3 AUTH notrealuser 1233456}
+ assert {[s acl_access_denied_auth] eq [expr $current_auth_failures + 2]}
+ assert_error "*WRONGPASS*" {r HELLO 2 AUTH notrealuser 1233456}
+ assert {[s acl_access_denied_auth] eq [expr $current_auth_failures + 3]}
+ assert {[s acl_access_denied_cmd] eq $current_invalid_cmd_accesses}
+ assert {[s acl_access_denied_key] eq $current_invalid_key_accesses}
+ assert {[s acl_access_denied_channel] eq $current_invalid_channel_accesses}
+ }
+
+ # If a user try to access an unauthorized command the metric increases
+ test {ACL-Metrics invalid command accesses} {
+ set current_auth_failures [s acl_access_denied_auth]
+ set current_invalid_cmd_accesses [s acl_access_denied_cmd]
+ set current_invalid_key_accesses [s acl_access_denied_key]
+ set current_invalid_channel_accesses [s acl_access_denied_channel]
+ r ACL setuser invalidcmduser on >passwd nocommands
+ r AUTH invalidcmduser passwd
+ assert_error "*no permissions to run the * command*" {r acl list}
+ r AUTH default ""
+ assert {[s acl_access_denied_auth] eq $current_auth_failures}
+ assert {[s acl_access_denied_cmd] eq [expr $current_invalid_cmd_accesses + 1]}
+ assert {[s acl_access_denied_key] eq $current_invalid_key_accesses}
+ assert {[s acl_access_denied_channel] eq $current_invalid_channel_accesses}
+ }
+
+ # If a user try to access an unauthorized key the metric increases
+ test {ACL-Metrics invalid key accesses} {
+ set current_auth_failures [s acl_access_denied_auth]
+ set current_invalid_cmd_accesses [s acl_access_denied_cmd]
+ set current_invalid_key_accesses [s acl_access_denied_key]
+ set current_invalid_channel_accesses [s acl_access_denied_channel]
+ r ACL setuser invalidkeyuser on >passwd resetkeys allcommands
+ r AUTH invalidkeyuser passwd
+ assert_error "*NOPERM*key*" {r get x}
+ r AUTH default ""
+ assert {[s acl_access_denied_auth] eq $current_auth_failures}
+ assert {[s acl_access_denied_cmd] eq $current_invalid_cmd_accesses}
+ assert {[s acl_access_denied_key] eq [expr $current_invalid_key_accesses + 1]}
+ assert {[s acl_access_denied_channel] eq $current_invalid_channel_accesses}
+ }
+
+ # If a user try to access an unauthorized channel the metric increases
+ test {ACL-Metrics invalid channels accesses} {
+ set current_auth_failures [s acl_access_denied_auth]
+ set current_invalid_cmd_accesses [s acl_access_denied_cmd]
+ set current_invalid_key_accesses [s acl_access_denied_key]
+ set current_invalid_channel_accesses [s acl_access_denied_channel]
+ r ACL setuser invalidchanneluser on >passwd resetchannels allcommands
+ r AUTH invalidkeyuser passwd
+ assert_error "*NOPERM*channel*" {r subscribe x}
+ r AUTH default ""
+ assert {[s acl_access_denied_auth] eq $current_auth_failures}
+ assert {[s acl_access_denied_cmd] eq $current_invalid_cmd_accesses}
+ assert {[s acl_access_denied_key] eq $current_invalid_key_accesses}
+ assert {[s acl_access_denied_channel] eq [expr $current_invalid_channel_accesses + 1]}
+ }
+}
+
+set server_path [tmpdir "server.acl"]
+exec cp -f tests/assets/user.acl $server_path
+start_server [list overrides [list "dir" $server_path "acl-pubsub-default" "allchannels" "aclfile" "user.acl"] tags [list "external:skip"]] {
+ # user alice on allcommands allkeys &* >alice
+ # user bob on -@all +@set +acl ~set* &* >bob
+ # user default on nopass ~* &* +@all
+
+ test {default: load from include file, can access any channels} {
+ r SUBSCRIBE foo
+ r PSUBSCRIBE bar*
+ r UNSUBSCRIBE
+ r PUNSUBSCRIBE
+ r PUBLISH hello world
+ }
+
+ test {default: with config acl-pubsub-default allchannels after reset, can access any channels} {
+ r ACL setuser default reset on nopass ~* +@all
+ r SUBSCRIBE foo
+ r PSUBSCRIBE bar*
+ r UNSUBSCRIBE
+ r PUNSUBSCRIBE
+ r PUBLISH hello world
+ }
+
+ test {default: with config acl-pubsub-default resetchannels after reset, can not access any channels} {
+ r CONFIG SET acl-pubsub-default resetchannels
+ r ACL setuser default reset on nopass ~* +@all
+ assert_error {*NOPERM*channel*} {r SUBSCRIBE foo}
+ assert_error {*NOPERM*channel*} {r PSUBSCRIBE bar*}
+ assert_error {*NOPERM*channel*} {r PUBLISH hello world}
+ r CONFIG SET acl-pubsub-default resetchannels
+ }
+
+ test {Alice: can execute all command} {
+ r AUTH alice alice
+ assert_equal "alice" [r acl whoami]
+ r SET key value
+ }
+
+ test {Bob: just execute @set and acl command} {
+ r AUTH bob bob
+ assert_equal "bob" [r acl whoami]
+ assert_equal "3" [r sadd set 1 2 3]
+ catch {r SET key value} e
+ set e
+ } {*NOPERM*set*}
+
+ test {ACL load and save} {
+ r ACL setuser eve +get allkeys >eve on
+ r ACL save
+
+ # ACL load will free user and kill clients
+ r ACL load
+ catch {r ACL LIST} e
+ assert_match {*I/O error*} $e
+
+ reconnect
+ r AUTH alice alice
+ r SET key value
+ r AUTH eve eve
+ r GET key
+ catch {r SET key value} e
+ set e
+ } {*NOPERM*set*}
+
+ test {ACL load and save with restricted channels} {
+ r AUTH alice alice
+ r ACL setuser harry on nopass resetchannels &test +@all ~*
+ r ACL save
+
+ # ACL load will free user and kill clients
+ r ACL load
+ catch {r ACL LIST} e
+ assert_match {*I/O error*} $e
+
+ reconnect
+ r AUTH harry anything
+ r publish test bar
+ catch {r publish test1 bar} e
+ r ACL deluser harry
+ set e
+ } {*NOPERM*channel*}
+}
+
+set server_path [tmpdir "resetchannels.acl"]
+exec cp -f tests/assets/nodefaultuser.acl $server_path
+exec cp -f tests/assets/default.conf $server_path
+start_server [list overrides [list "dir" $server_path "aclfile" "nodefaultuser.acl"] tags [list "external:skip"]] {
+
+ test {Default user has access to all channels irrespective of flag} {
+ set channelinfo [dict get [r ACL getuser default] channels]
+ assert_equal "&*" $channelinfo
+ set channelinfo [dict get [r ACL getuser alice] channels]
+ assert_equal "" $channelinfo
+ }
+
+ test {Update acl-pubsub-default, existing users shouldn't get affected} {
+ set channelinfo [dict get [r ACL getuser default] channels]
+ assert_equal "&*" $channelinfo
+ r CONFIG set acl-pubsub-default allchannels
+ r ACL setuser mydefault
+ set channelinfo [dict get [r ACL getuser mydefault] channels]
+ assert_equal "&*" $channelinfo
+ r CONFIG set acl-pubsub-default resetchannels
+ set channelinfo [dict get [r ACL getuser mydefault] channels]
+ assert_equal "&*" $channelinfo
+ }
+
+ test {Single channel is valid} {
+ r ACL setuser onechannel &test
+ set channelinfo [dict get [r ACL getuser onechannel] channels]
+ assert_equal "&test" $channelinfo
+ r ACL deluser onechannel
+ }
+
+ test {Single channel is not valid with allchannels} {
+ r CONFIG set acl-pubsub-default allchannels
+ catch {r ACL setuser onechannel &test} err
+ r CONFIG set acl-pubsub-default resetchannels
+ set err
+ } {*start with an empty list of channels*}
+}
+
+set server_path [tmpdir "resetchannels.acl"]
+exec cp -f tests/assets/nodefaultuser.acl $server_path
+exec cp -f tests/assets/default.conf $server_path
+start_server [list overrides [list "dir" $server_path "acl-pubsub-default" "resetchannels" "aclfile" "nodefaultuser.acl"] tags [list "external:skip"]] {
+
+ test {Only default user has access to all channels irrespective of flag} {
+ set channelinfo [dict get [r ACL getuser default] channels]
+ assert_equal "&*" $channelinfo
+ set channelinfo [dict get [r ACL getuser alice] channels]
+ assert_equal "" $channelinfo
+ }
+}
+
+
+start_server {overrides {user "default on nopass ~* +@all"} tags {"external:skip"}} {
+ test {default: load from config file, without channel permission default user can't access any channels} {
+ catch {r SUBSCRIBE foo} e
+ set e
+ } {*NOPERM*channel*}
+}
+
+start_server {overrides {user "default on nopass ~* &* +@all"} tags {"external:skip"}} {
+ test {default: load from config file with all channels permissions} {
+ r SUBSCRIBE foo
+ r PSUBSCRIBE bar*
+ r UNSUBSCRIBE
+ r PUNSUBSCRIBE
+ r PUBLISH hello world
+ }
+}
+
+set server_path [tmpdir "duplicate.acl"]
+exec cp -f tests/assets/user.acl $server_path
+exec cp -f tests/assets/default.conf $server_path
+start_server [list overrides [list "dir" $server_path "aclfile" "user.acl"] tags [list "external:skip"]] {
+
+ test {Test loading an ACL file with duplicate users} {
+ exec cp -f tests/assets/user.acl $server_path
+
+ # Corrupt the ACL file
+ set corruption "\nuser alice on nopass ~* -@all"
+ exec echo $corruption >> $server_path/user.acl
+ catch {r ACL LOAD} err
+ assert_match {*Duplicate user 'alice' found*} $err
+
+ # Verify the previous users still exist
+ # NOTE: A missing user evaluates to an empty
+ # string.
+ assert {[r ACL GETUSER alice] != ""}
+ assert_equal [dict get [r ACL GETUSER alice] commands] "+@all"
+ assert {[r ACL GETUSER bob] != ""}
+ assert {[r ACL GETUSER default] != ""}
+ }
+
+ test {Test loading an ACL file with duplicate default user} {
+ exec cp -f tests/assets/user.acl $server_path
+
+ # Corrupt the ACL file
+ set corruption "\nuser default on nopass ~* -@all"
+ exec echo $corruption >> $server_path/user.acl
+ catch {r ACL LOAD} err
+ assert_match {*Duplicate user 'default' found*} $err
+
+ # Verify the previous users still exist
+ # NOTE: A missing user evaluates to an empty
+ # string.
+ assert {[r ACL GETUSER alice] != ""}
+ assert_equal [dict get [r ACL GETUSER alice] commands] "+@all"
+ assert {[r ACL GETUSER bob] != ""}
+ assert {[r ACL GETUSER default] != ""}
+ }
+
+ test {Test loading duplicate users in config on startup} {
+ catch {exec src/redis-server --user foo --user foo} err
+ assert_match {*Duplicate user*} $err
+
+ catch {exec src/redis-server --user default --user default} err
+ assert_match {*Duplicate user*} $err
+ } {} {external:skip}
+}
+
+start_server {overrides {user "default on nopass ~* +@all -flushdb"} tags {acl external:skip}} {
+ test {ACL from config file and config rewrite} {
+ assert_error {NOPERM *} {r flushdb}
+ r config rewrite
+ restart_server 0 true false
+ assert_error {NOPERM *} {r flushdb}
+ }
+}
+
diff --git a/tests/unit/aofrw.tcl b/tests/unit/aofrw.tcl
new file mode 100644
index 0000000..cc75452
--- /dev/null
+++ b/tests/unit/aofrw.tcl
@@ -0,0 +1,232 @@
+# This unit has the potential to create huge .reqres files, causing log-req-res-validator.py to run for a very long time...
+# Since this unit doesn't do anything worth validating, reply_schema-wise, we decided to skip it
+start_server {tags {"aofrw external:skip logreqres:skip"} overrides {save {}}} {
+ # Enable the AOF
+ r config set appendonly yes
+ r config set auto-aof-rewrite-percentage 0 ; # Disable auto-rewrite.
+ waitForBgrewriteaof r
+
+ foreach rdbpre {yes no} {
+ r config set aof-use-rdb-preamble $rdbpre
+ test "AOF rewrite during write load: RDB preamble=$rdbpre" {
+ # Start a write load for 10 seconds
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+ set load_handle0 [start_write_load $master_host $master_port 10]
+ set load_handle1 [start_write_load $master_host $master_port 10]
+ set load_handle2 [start_write_load $master_host $master_port 10]
+ set load_handle3 [start_write_load $master_host $master_port 10]
+ set load_handle4 [start_write_load $master_host $master_port 10]
+
+ # Make sure the instance is really receiving data
+ wait_for_condition 50 100 {
+ [r dbsize] > 0
+ } else {
+ fail "No write load detected."
+ }
+
+ # After 3 seconds, start a rewrite, while the write load is still
+ # active.
+ after 3000
+ r bgrewriteaof
+ waitForBgrewriteaof r
+
+ # Let it run a bit more so that we'll append some data to the new
+ # AOF.
+ after 1000
+
+ # Stop the processes generating the load if they are still active
+ stop_write_load $load_handle0
+ stop_write_load $load_handle1
+ stop_write_load $load_handle2
+ stop_write_load $load_handle3
+ stop_write_load $load_handle4
+
+ # Make sure no more commands processed, before taking debug digest
+ wait_load_handlers_disconnected
+
+ # Get the data set digest
+ set d1 [debug_digest]
+
+ # Load the AOF
+ r debug loadaof
+ set d2 [debug_digest]
+
+ # Make sure they are the same
+ assert {$d1 eq $d2}
+ }
+ }
+}
+
+start_server {tags {"aofrw external:skip"} overrides {aof-use-rdb-preamble no}} {
+ test {Turning off AOF kills the background writing child if any} {
+ r config set appendonly yes
+ waitForBgrewriteaof r
+
+ # start a slow AOFRW
+ r set k v
+ r config set rdb-key-save-delay 10000000
+ r bgrewriteaof
+
+ # disable AOF and wait for the child to be killed
+ r config set appendonly no
+ wait_for_condition 50 100 {
+ [string match {*Killing*AOF*child*} [exec tail -5 < [srv 0 stdout]]]
+ } else {
+ fail "Can't find 'Killing AOF child' into recent logs"
+ }
+ r config set rdb-key-save-delay 0
+ }
+
+ foreach d {string int} {
+ foreach e {listpack quicklist} {
+ test "AOF rewrite of list with $e encoding, $d data" {
+ r flushall
+ if {$e eq {listpack}} {
+ r config set list-max-listpack-size -2
+ set len 10
+ } else {
+ r config set list-max-listpack-size 10
+ set len 1000
+ }
+ for {set j 0} {$j < $len} {incr j} {
+ if {$d eq {string}} {
+ set data [randstring 0 16 alpha]
+ } else {
+ set data [randomInt 4000000000]
+ }
+ r lpush key $data
+ }
+ assert_equal [r object encoding key] $e
+ set d1 [debug_digest]
+ r bgrewriteaof
+ waitForBgrewriteaof r
+ r debug loadaof
+ set d2 [debug_digest]
+ if {$d1 ne $d2} {
+ error "assertion:$d1 is not equal to $d2"
+ }
+ }
+ }
+ }
+
+ foreach d {string int} {
+ foreach e {intset hashtable} {
+ test "AOF rewrite of set with $e encoding, $d data" {
+ r flushall
+ if {$e eq {intset}} {set len 10} else {set len 1000}
+ for {set j 0} {$j < $len} {incr j} {
+ if {$d eq {string}} {
+ set data [randstring 0 16 alpha]
+ } else {
+ set data [randomInt 4000000000]
+ }
+ r sadd key $data
+ }
+ if {$d ne {string}} {
+ assert_equal [r object encoding key] $e
+ }
+ set d1 [debug_digest]
+ r bgrewriteaof
+ waitForBgrewriteaof r
+ r debug loadaof
+ set d2 [debug_digest]
+ if {$d1 ne $d2} {
+ error "assertion:$d1 is not equal to $d2"
+ }
+ }
+ }
+ }
+
+ foreach d {string int} {
+ foreach e {listpack hashtable} {
+ test "AOF rewrite of hash with $e encoding, $d data" {
+ r flushall
+ if {$e eq {listpack}} {set len 10} else {set len 1000}
+ for {set j 0} {$j < $len} {incr j} {
+ if {$d eq {string}} {
+ set data [randstring 0 16 alpha]
+ } else {
+ set data [randomInt 4000000000]
+ }
+ r hset key $data $data
+ }
+ assert_equal [r object encoding key] $e
+ set d1 [debug_digest]
+ r bgrewriteaof
+ waitForBgrewriteaof r
+ r debug loadaof
+ set d2 [debug_digest]
+ if {$d1 ne $d2} {
+ error "assertion:$d1 is not equal to $d2"
+ }
+ }
+ }
+ }
+
+ foreach d {string int} {
+ foreach e {listpack skiplist} {
+ test "AOF rewrite of zset with $e encoding, $d data" {
+ r flushall
+ if {$e eq {listpack}} {set len 10} else {set len 1000}
+ for {set j 0} {$j < $len} {incr j} {
+ if {$d eq {string}} {
+ set data [randstring 0 16 alpha]
+ } else {
+ set data [randomInt 4000000000]
+ }
+ r zadd key [expr rand()] $data
+ }
+ assert_equal [r object encoding key] $e
+ set d1 [debug_digest]
+ r bgrewriteaof
+ waitForBgrewriteaof r
+ r debug loadaof
+ set d2 [debug_digest]
+ if {$d1 ne $d2} {
+ error "assertion:$d1 is not equal to $d2"
+ }
+ }
+ }
+ }
+
+ test "AOF rewrite functions" {
+ r flushall
+ r FUNCTION LOAD {#!lua name=test
+ redis.register_function('test', function() return 1 end)
+ }
+ r bgrewriteaof
+ waitForBgrewriteaof r
+ r function flush
+ r debug loadaof
+ assert_equal [r fcall test 0] 1
+ r FUNCTION LIST
+ } {{library_name test engine LUA functions {{name test description {} flags {}}}}}
+
+ test {BGREWRITEAOF is delayed if BGSAVE is in progress} {
+ r flushall
+ r set k v
+ r config set rdb-key-save-delay 10000000
+ r bgsave
+ assert_match {*scheduled*} [r bgrewriteaof]
+ assert_equal [s aof_rewrite_scheduled] 1
+ r config set rdb-key-save-delay 0
+ catch {exec kill -9 [get_child_pid 0]}
+ while {[s aof_rewrite_scheduled] eq 1} {
+ after 100
+ }
+ }
+
+ test {BGREWRITEAOF is refused if already in progress} {
+ r config set aof-use-rdb-preamble yes
+ r config set rdb-key-save-delay 10000000
+ catch {
+ r bgrewriteaof
+ r bgrewriteaof
+ } e
+ assert_match {*ERR*already*} $e
+ r config set rdb-key-save-delay 0
+ catch {exec kill -9 [get_child_pid 0]}
+ }
+}
diff --git a/tests/unit/auth.tcl b/tests/unit/auth.tcl
new file mode 100644
index 0000000..9532e0b
--- /dev/null
+++ b/tests/unit/auth.tcl
@@ -0,0 +1,89 @@
+start_server {tags {"auth external:skip"}} {
+ test {AUTH fails if there is no password configured server side} {
+ catch {r auth foo} err
+ set _ $err
+ } {ERR *any password*}
+
+ test {Arity check for auth command} {
+ catch {r auth a b c} err
+ set _ $err
+ } {*syntax error*}
+}
+
+start_server {tags {"auth external:skip"} overrides {requirepass foobar}} {
+ test {AUTH fails when a wrong password is given} {
+ catch {r auth wrong!} err
+ set _ $err
+ } {WRONGPASS*}
+
+ test {Arbitrary command gives an error when AUTH is required} {
+ catch {r set foo bar} err
+ set _ $err
+ } {NOAUTH*}
+
+ test {AUTH succeeds when the right password is given} {
+ r auth foobar
+ } {OK}
+
+ test {Once AUTH succeeded we can actually send commands to the server} {
+ r set foo 100
+ r incr foo
+ } {101}
+
+ test {For unauthenticated clients multibulk and bulk length are limited} {
+ set rr [redis [srv "host"] [srv "port"] 0 $::tls]
+ $rr write "*100\r\n"
+ $rr flush
+ catch {[$rr read]} e
+ assert_match {*unauthenticated multibulk length*} $e
+ $rr close
+
+ set rr [redis [srv "host"] [srv "port"] 0 $::tls]
+ $rr write "*1\r\n\$100000000\r\n"
+ $rr flush
+ catch {[$rr read]} e
+ assert_match {*unauthenticated bulk length*} $e
+ $rr close
+ }
+}
+
+start_server {tags {"auth_binary_password external:skip"}} {
+ test {AUTH fails when binary password is wrong} {
+ r config set requirepass "abc\x00def"
+ catch {r auth abc} err
+ set _ $err
+ } {WRONGPASS*}
+
+ test {AUTH succeeds when binary password is correct} {
+ r config set requirepass "abc\x00def"
+ r auth "abc\x00def"
+ } {OK}
+
+ start_server {tags {"masterauth"}} {
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+ set slave [srv 0 client]
+
+ test {MASTERAUTH test with binary password} {
+ $master config set requirepass "abc\x00def"
+
+ # Configure the replica with masterauth
+ set loglines [count_log_lines 0]
+ $slave config set masterauth "abc"
+ $slave slaveof $master_host $master_port
+
+ # Verify replica is not able to sync with master
+ wait_for_log_messages 0 {"*Unable to AUTH to MASTER*"} $loglines 1000 10
+ assert_equal {down} [s 0 master_link_status]
+
+ # Test replica with the correct masterauth
+ $slave config set masterauth "abc\x00def"
+ wait_for_condition 50 100 {
+ [s 0 master_link_status] eq {up}
+ } else {
+ fail "Can't turn the instance into a replica"
+ }
+ }
+ }
+}
diff --git a/tests/unit/bitfield.tcl b/tests/unit/bitfield.tcl
new file mode 100644
index 0000000..21091aa
--- /dev/null
+++ b/tests/unit/bitfield.tcl
@@ -0,0 +1,263 @@
+start_server {tags {"bitops"}} {
+ test {BITFIELD signed SET and GET basics} {
+ r del bits
+ set results {}
+ lappend results [r bitfield bits set i8 0 -100]
+ lappend results [r bitfield bits set i8 0 101]
+ lappend results [r bitfield bits get i8 0]
+ set results
+ } {0 -100 101}
+
+ test {BITFIELD unsigned SET and GET basics} {
+ r del bits
+ set results {}
+ lappend results [r bitfield bits set u8 0 255]
+ lappend results [r bitfield bits set u8 0 100]
+ lappend results [r bitfield bits get u8 0]
+ set results
+ } {0 255 100}
+
+ test {BITFIELD signed SET and GET together} {
+ r del bits
+ set results [r bitfield bits set i8 0 255 set i8 0 100 get i8 0]
+ } {0 -1 100}
+
+ test {BITFIELD unsigned with SET, GET and INCRBY arguments} {
+ r del bits
+ set results [r bitfield bits set u8 0 255 incrby u8 0 100 get u8 0]
+ } {0 99 99}
+
+ test {BITFIELD with only key as argument} {
+ r del bits
+ set result [r bitfield bits]
+ assert {$result eq {}}
+ }
+
+ test {BITFIELD #<idx> form} {
+ r del bits
+ set results {}
+ r bitfield bits set u8 #0 65
+ r bitfield bits set u8 #1 66
+ r bitfield bits set u8 #2 67
+ r get bits
+ } {ABC}
+
+ test {BITFIELD basic INCRBY form} {
+ r del bits
+ set results {}
+ r bitfield bits set u8 #0 10
+ lappend results [r bitfield bits incrby u8 #0 100]
+ lappend results [r bitfield bits incrby u8 #0 100]
+ set results
+ } {110 210}
+
+ test {BITFIELD chaining of multiple commands} {
+ r del bits
+ set results {}
+ r bitfield bits set u8 #0 10
+ lappend results [r bitfield bits incrby u8 #0 100 incrby u8 #0 100]
+ set results
+ } {{110 210}}
+
+ test {BITFIELD unsigned overflow wrap} {
+ r del bits
+ set results {}
+ r bitfield bits set u8 #0 100
+ lappend results [r bitfield bits overflow wrap incrby u8 #0 257]
+ lappend results [r bitfield bits get u8 #0]
+ lappend results [r bitfield bits overflow wrap incrby u8 #0 255]
+ lappend results [r bitfield bits get u8 #0]
+ } {101 101 100 100}
+
+ test {BITFIELD unsigned overflow sat} {
+ r del bits
+ set results {}
+ r bitfield bits set u8 #0 100
+ lappend results [r bitfield bits overflow sat incrby u8 #0 257]
+ lappend results [r bitfield bits get u8 #0]
+ lappend results [r bitfield bits overflow sat incrby u8 #0 -255]
+ lappend results [r bitfield bits get u8 #0]
+ } {255 255 0 0}
+
+ test {BITFIELD signed overflow wrap} {
+ r del bits
+ set results {}
+ r bitfield bits set i8 #0 100
+ lappend results [r bitfield bits overflow wrap incrby i8 #0 257]
+ lappend results [r bitfield bits get i8 #0]
+ lappend results [r bitfield bits overflow wrap incrby i8 #0 255]
+ lappend results [r bitfield bits get i8 #0]
+ } {101 101 100 100}
+
+ test {BITFIELD signed overflow sat} {
+ r del bits
+ set results {}
+ r bitfield bits set u8 #0 100
+ lappend results [r bitfield bits overflow sat incrby i8 #0 257]
+ lappend results [r bitfield bits get i8 #0]
+ lappend results [r bitfield bits overflow sat incrby i8 #0 -255]
+ lappend results [r bitfield bits get i8 #0]
+ } {127 127 -128 -128}
+
+ test {BITFIELD overflow detection fuzzing} {
+ for {set j 0} {$j < 1000} {incr j} {
+ set bits [expr {[randomInt 64]+1}]
+ set sign [randomInt 2]
+ set range [expr {2**$bits}]
+ if {$bits == 64} {set sign 1} ; # u64 is not supported by BITFIELD.
+ if {$sign} {
+ set min [expr {-($range/2)}]
+ set type "i$bits"
+ } else {
+ set min 0
+ set type "u$bits"
+ }
+ set max [expr {$min+$range-1}]
+
+ # Compare Tcl vs Redis
+ set range2 [expr {$range*2}]
+ set value [expr {($min*2)+[randomInt $range2]}]
+ set increment [expr {($min*2)+[randomInt $range2]}]
+ if {$value > 9223372036854775807} {
+ set value 9223372036854775807
+ }
+ if {$value < -9223372036854775808} {
+ set value -9223372036854775808
+ }
+ if {$increment > 9223372036854775807} {
+ set increment 9223372036854775807
+ }
+ if {$increment < -9223372036854775808} {
+ set increment -9223372036854775808
+ }
+
+ set overflow 0
+ if {$value > $max || $value < $min} {set overflow 1}
+ if {($value + $increment) > $max} {set overflow 1}
+ if {($value + $increment) < $min} {set overflow 1}
+
+ r del bits
+ set res1 [r bitfield bits overflow fail set $type 0 $value]
+ set res2 [r bitfield bits overflow fail incrby $type 0 $increment]
+
+ if {$overflow && [lindex $res1 0] ne {} &&
+ [lindex $res2 0] ne {}} {
+ fail "OW not detected where needed: $type $value+$increment"
+ }
+ if {!$overflow && ([lindex $res1 0] eq {} ||
+ [lindex $res2 0] eq {})} {
+ fail "OW detected where NOT needed: $type $value+$increment"
+ }
+ }
+ }
+
+ test {BITFIELD overflow wrap fuzzing} {
+ for {set j 0} {$j < 1000} {incr j} {
+ set bits [expr {[randomInt 64]+1}]
+ set sign [randomInt 2]
+ set range [expr {2**$bits}]
+ if {$bits == 64} {set sign 1} ; # u64 is not supported by BITFIELD.
+ if {$sign} {
+ set min [expr {-($range/2)}]
+ set type "i$bits"
+ } else {
+ set min 0
+ set type "u$bits"
+ }
+ set max [expr {$min+$range-1}]
+
+ # Compare Tcl vs Redis
+ set range2 [expr {$range*2}]
+ set value [expr {($min*2)+[randomInt $range2]}]
+ set increment [expr {($min*2)+[randomInt $range2]}]
+ if {$value > 9223372036854775807} {
+ set value 9223372036854775807
+ }
+ if {$value < -9223372036854775808} {
+ set value -9223372036854775808
+ }
+ if {$increment > 9223372036854775807} {
+ set increment 9223372036854775807
+ }
+ if {$increment < -9223372036854775808} {
+ set increment -9223372036854775808
+ }
+
+ r del bits
+ r bitfield bits overflow wrap set $type 0 $value
+ r bitfield bits overflow wrap incrby $type 0 $increment
+ set res [lindex [r bitfield bits get $type 0] 0]
+
+ set expected 0
+ if {$sign} {incr expected [expr {$max+1}]}
+ incr expected $value
+ incr expected $increment
+ set expected [expr {$expected % $range}]
+ if {$sign} {incr expected $min}
+
+ if {$res != $expected} {
+ fail "WRAP error: $type $value+$increment = $res, should be $expected"
+ }
+ }
+ }
+
+ test {BITFIELD regression for #3221} {
+ r set bits 1
+ r bitfield bits get u1 0
+ } {0}
+
+ test {BITFIELD regression for #3564} {
+ for {set j 0} {$j < 10} {incr j} {
+ r del mystring
+ set res [r BITFIELD mystring SET i8 0 10 SET i8 64 10 INCRBY i8 10 99900]
+ assert {$res eq {0 0 60}}
+ }
+ r del mystring
+ }
+
+ test {BITFIELD_RO with only key as argument} {
+ set res [r bitfield_ro bits]
+ assert {$res eq {}}
+ }
+
+ test {BITFIELD_RO fails when write option is used} {
+ catch {r bitfield_ro bits set u8 0 100 get u8 0} err
+ assert_match {*ERR BITFIELD_RO only supports the GET subcommand*} $err
+ }
+}
+
+start_server {tags {"repl external:skip"}} {
+ start_server {} {
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+ set slave [srv 0 client]
+
+ test {BITFIELD: setup slave} {
+ $slave slaveof $master_host $master_port
+ wait_for_condition 50 100 {
+ [s 0 master_link_status] eq {up}
+ } else {
+ fail "Replication not started."
+ }
+ }
+
+ test {BITFIELD: write on master, read on slave} {
+ $master del bits
+ assert_equal 0 [$master bitfield bits set u8 0 255]
+ assert_equal 255 [$master bitfield bits set u8 0 100]
+ wait_for_ofs_sync $master $slave
+ assert_equal 100 [$slave bitfield_ro bits get u8 0]
+ }
+
+ test {BITFIELD_RO with only key as argument on read-only replica} {
+ set res [$slave bitfield_ro bits]
+ assert {$res eq {}}
+ }
+
+ test {BITFIELD_RO fails when write option is used on read-only replica} {
+ catch {$slave bitfield_ro bits set u8 0 100 get u8 0} err
+ assert_match {*ERR BITFIELD_RO only supports the GET subcommand*} $err
+ }
+ }
+}
diff --git a/tests/unit/bitops.tcl b/tests/unit/bitops.tcl
new file mode 100644
index 0000000..1b7db40
--- /dev/null
+++ b/tests/unit/bitops.tcl
@@ -0,0 +1,593 @@
+# Compare Redis commands against Tcl implementations of the same commands.
+proc count_bits s {
+ binary scan $s b* bits
+ string length [regsub -all {0} $bits {}]
+}
+
+# start end are bit index
+proc count_bits_start_end {s start end} {
+ binary scan $s B* bits
+ string length [regsub -all {0} [string range $bits $start $end] {}]
+}
+
+proc simulate_bit_op {op args} {
+ set maxlen 0
+ set j 0
+ set count [llength $args]
+ foreach a $args {
+ binary scan $a b* bits
+ set b($j) $bits
+ if {[string length $bits] > $maxlen} {
+ set maxlen [string length $bits]
+ }
+ incr j
+ }
+ for {set j 0} {$j < $count} {incr j} {
+ if {[string length $b($j)] < $maxlen} {
+ append b($j) [string repeat 0 [expr $maxlen-[string length $b($j)]]]
+ }
+ }
+ set out {}
+ for {set x 0} {$x < $maxlen} {incr x} {
+ set bit [string range $b(0) $x $x]
+ if {$op eq {not}} {set bit [expr {!$bit}]}
+ for {set j 1} {$j < $count} {incr j} {
+ set bit2 [string range $b($j) $x $x]
+ switch $op {
+ and {set bit [expr {$bit & $bit2}]}
+ or {set bit [expr {$bit | $bit2}]}
+ xor {set bit [expr {$bit ^ $bit2}]}
+ }
+ }
+ append out $bit
+ }
+ binary format b* $out
+}
+
+start_server {tags {"bitops"}} {
+ test {BITCOUNT returns 0 against non existing key} {
+ assert {[r bitcount no-key] == 0}
+ assert {[r bitcount no-key 0 1000 bit] == 0}
+ }
+
+ test {BITCOUNT returns 0 with out of range indexes} {
+ r set str "xxxx"
+ assert {[r bitcount str 4 10] == 0}
+ assert {[r bitcount str 32 87 bit] == 0}
+ }
+
+ test {BITCOUNT returns 0 with negative indexes where start > end} {
+ r set str "xxxx"
+ assert {[r bitcount str -6 -7] == 0}
+ assert {[r bitcount str -6 -15 bit] == 0}
+ }
+
+ catch {unset num}
+ foreach vec [list "" "\xaa" "\x00\x00\xff" "foobar" "123"] {
+ incr num
+ test "BITCOUNT against test vector #$num" {
+ r set str $vec
+ set count [count_bits $vec]
+ assert {[r bitcount str] == $count}
+ assert {[r bitcount str 0 -1 bit] == $count}
+ }
+ }
+
+ test {BITCOUNT fuzzing without start/end} {
+ for {set j 0} {$j < 100} {incr j} {
+ set str [randstring 0 3000]
+ r set str $str
+ set count [count_bits $str]
+ assert {[r bitcount str] == $count}
+ assert {[r bitcount str 0 -1 bit] == $count}
+ }
+ }
+
+ test {BITCOUNT fuzzing with start/end} {
+ for {set j 0} {$j < 100} {incr j} {
+ set str [randstring 0 3000]
+ r set str $str
+ set l [string length $str]
+ set start [randomInt $l]
+ set end [randomInt $l]
+ if {$start > $end} {
+ # Swap start and end
+ lassign [list $end $start] start end
+ }
+ assert {[r bitcount str $start $end] == [count_bits [string range $str $start $end]]}
+ }
+
+ for {set j 0} {$j < 100} {incr j} {
+ set str [randstring 0 3000]
+ r set str $str
+ set l [expr [string length $str] * 8]
+ set start [randomInt $l]
+ set end [randomInt $l]
+ if {$start > $end} {
+ # Swap start and end
+ lassign [list $end $start] start end
+ }
+ assert {[r bitcount str $start $end bit] == [count_bits_start_end $str $start $end]}
+ }
+ }
+
+ test {BITCOUNT with start, end} {
+ set s "foobar"
+ r set s $s
+ assert_equal [r bitcount s 0 -1] [count_bits "foobar"]
+ assert_equal [r bitcount s 1 -2] [count_bits "ooba"]
+ assert_equal [r bitcount s -2 1] [count_bits ""]
+ assert_equal [r bitcount s 0 1000] [count_bits "foobar"]
+
+ assert_equal [r bitcount s 0 -1 bit] [count_bits $s]
+ assert_equal [r bitcount s 10 14 bit] [count_bits_start_end $s 10 14]
+ assert_equal [r bitcount s 3 14 bit] [count_bits_start_end $s 3 14]
+ assert_equal [r bitcount s 3 29 bit] [count_bits_start_end $s 3 29]
+ assert_equal [r bitcount s 10 -34 bit] [count_bits_start_end $s 10 14]
+ assert_equal [r bitcount s 3 -34 bit] [count_bits_start_end $s 3 14]
+ assert_equal [r bitcount s 3 -19 bit] [count_bits_start_end $s 3 29]
+ assert_equal [r bitcount s -2 1 bit] 0
+ assert_equal [r bitcount s 0 1000 bit] [count_bits $s]
+ }
+
+ test {BITCOUNT syntax error #1} {
+ catch {r bitcount s 0} e
+ set e
+ } {ERR *syntax*}
+
+ test {BITCOUNT syntax error #2} {
+ catch {r bitcount s 0 1 hello} e
+ set e
+ } {ERR *syntax*}
+
+ test {BITCOUNT regression test for github issue #582} {
+ r del foo
+ r setbit foo 0 1
+ if {[catch {r bitcount foo 0 4294967296} e]} {
+ assert_match {*ERR*out of range*} $e
+ set _ 1
+ } else {
+ set e
+ }
+ } {1}
+
+ test {BITCOUNT misaligned prefix} {
+ r del str
+ r set str ab
+ r bitcount str 1 -1
+ } {3}
+
+ test {BITCOUNT misaligned prefix + full words + remainder} {
+ r del str
+ r set str __PPxxxxxxxxxxxxxxxxRR__
+ r bitcount str 2 -3
+ } {74}
+
+ test {BITOP NOT (empty string)} {
+ r set s{t} ""
+ r bitop not dest{t} s{t}
+ r get dest{t}
+ } {}
+
+ test {BITOP NOT (known string)} {
+ r set s{t} "\xaa\x00\xff\x55"
+ r bitop not dest{t} s{t}
+ r get dest{t}
+ } "\x55\xff\x00\xaa"
+
+ test {BITOP where dest and target are the same key} {
+ r set s "\xaa\x00\xff\x55"
+ r bitop not s s
+ r get s
+ } "\x55\xff\x00\xaa"
+
+ test {BITOP AND|OR|XOR don't change the string with single input key} {
+ r set a{t} "\x01\x02\xff"
+ r bitop and res1{t} a{t}
+ r bitop or res2{t} a{t}
+ r bitop xor res3{t} a{t}
+ list [r get res1{t}] [r get res2{t}] [r get res3{t}]
+ } [list "\x01\x02\xff" "\x01\x02\xff" "\x01\x02\xff"]
+
+ test {BITOP missing key is considered a stream of zero} {
+ r set a{t} "\x01\x02\xff"
+ r bitop and res1{t} no-suck-key{t} a{t}
+ r bitop or res2{t} no-suck-key{t} a{t} no-such-key{t}
+ r bitop xor res3{t} no-such-key{t} a{t}
+ list [r get res1{t}] [r get res2{t}] [r get res3{t}]
+ } [list "\x00\x00\x00" "\x01\x02\xff" "\x01\x02\xff"]
+
+ test {BITOP shorter keys are zero-padded to the key with max length} {
+ r set a{t} "\x01\x02\xff\xff"
+ r set b{t} "\x01\x02\xff"
+ r bitop and res1{t} a{t} b{t}
+ r bitop or res2{t} a{t} b{t}
+ r bitop xor res3{t} a{t} b{t}
+ list [r get res1{t}] [r get res2{t}] [r get res3{t}]
+ } [list "\x01\x02\xff\x00" "\x01\x02\xff\xff" "\x00\x00\x00\xff"]
+
+ foreach op {and or xor} {
+ test "BITOP $op fuzzing" {
+ for {set i 0} {$i < 10} {incr i} {
+ r flushall
+ set vec {}
+ set veckeys {}
+ set numvec [expr {[randomInt 10]+1}]
+ for {set j 0} {$j < $numvec} {incr j} {
+ set str [randstring 0 1000]
+ lappend vec $str
+ lappend veckeys vector_$j{t}
+ r set vector_$j{t} $str
+ }
+ r bitop $op target{t} {*}$veckeys
+ assert_equal [r get target{t}] [simulate_bit_op $op {*}$vec]
+ }
+ }
+ }
+
+ test {BITOP NOT fuzzing} {
+ for {set i 0} {$i < 10} {incr i} {
+ r flushall
+ set str [randstring 0 1000]
+ r set str{t} $str
+ r bitop not target{t} str{t}
+ assert_equal [r get target{t}] [simulate_bit_op not $str]
+ }
+ }
+
+ test {BITOP with integer encoded source objects} {
+ r set a{t} 1
+ r set b{t} 2
+ r bitop xor dest{t} a{t} b{t} a{t}
+ r get dest{t}
+ } {2}
+
+ test {BITOP with non string source key} {
+ r del c{t}
+ r set a{t} 1
+ r set b{t} 2
+ r lpush c{t} foo
+ catch {r bitop xor dest{t} a{t} b{t} c{t} d{t}} e
+ set e
+ } {WRONGTYPE*}
+
+ test {BITOP with empty string after non empty string (issue #529)} {
+ r flushdb
+ r set a{t} "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
+ r bitop or x{t} a{t} b{t}
+ } {32}
+
+ test {BITPOS bit=0 with empty key returns 0} {
+ r del str
+ assert {[r bitpos str 0] == 0}
+ assert {[r bitpos str 0 0 -1 bit] == 0}
+ }
+
+ test {BITPOS bit=1 with empty key returns -1} {
+ r del str
+ assert {[r bitpos str 1] == -1}
+ assert {[r bitpos str 1 0 -1] == -1}
+ }
+
+ test {BITPOS bit=0 with string less than 1 word works} {
+ r set str "\xff\xf0\x00"
+ assert {[r bitpos str 0] == 12}
+ assert {[r bitpos str 0 0 -1 bit] == 12}
+ }
+
+ test {BITPOS bit=1 with string less than 1 word works} {
+ r set str "\x00\x0f\x00"
+ assert {[r bitpos str 1] == 12}
+ assert {[r bitpos str 1 0 -1 bit] == 12}
+ }
+
+ test {BITPOS bit=0 starting at unaligned address} {
+ r set str "\xff\xf0\x00"
+ assert {[r bitpos str 0 1] == 12}
+ assert {[r bitpos str 0 1 -1 bit] == 12}
+ }
+
+ test {BITPOS bit=1 starting at unaligned address} {
+ r set str "\x00\x0f\xff"
+ assert {[r bitpos str 1 1] == 12}
+ assert {[r bitpos str 1 1 -1 bit] == 12}
+ }
+
+ test {BITPOS bit=0 unaligned+full word+reminder} {
+ r del str
+ r set str "\xff\xff\xff" ; # Prefix
+ # Followed by two (or four in 32 bit systems) full words
+ r append str "\xff\xff\xff\xff\xff\xff\xff\xff"
+ r append str "\xff\xff\xff\xff\xff\xff\xff\xff"
+ r append str "\xff\xff\xff\xff\xff\xff\xff\xff"
+ # First zero bit.
+ r append str "\x0f"
+ assert {[r bitpos str 0] == 216}
+ assert {[r bitpos str 0 1] == 216}
+ assert {[r bitpos str 0 2] == 216}
+ assert {[r bitpos str 0 3] == 216}
+ assert {[r bitpos str 0 4] == 216}
+ assert {[r bitpos str 0 5] == 216}
+ assert {[r bitpos str 0 6] == 216}
+ assert {[r bitpos str 0 7] == 216}
+ assert {[r bitpos str 0 8] == 216}
+
+ assert {[r bitpos str 0 1 -1 bit] == 216}
+ assert {[r bitpos str 0 9 -1 bit] == 216}
+ assert {[r bitpos str 0 17 -1 bit] == 216}
+ assert {[r bitpos str 0 25 -1 bit] == 216}
+ assert {[r bitpos str 0 33 -1 bit] == 216}
+ assert {[r bitpos str 0 41 -1 bit] == 216}
+ assert {[r bitpos str 0 49 -1 bit] == 216}
+ assert {[r bitpos str 0 57 -1 bit] == 216}
+ assert {[r bitpos str 0 65 -1 bit] == 216}
+ }
+
+ test {BITPOS bit=1 unaligned+full word+reminder} {
+ r del str
+ r set str "\x00\x00\x00" ; # Prefix
+ # Followed by two (or four in 32 bit systems) full words
+ r append str "\x00\x00\x00\x00\x00\x00\x00\x00"
+ r append str "\x00\x00\x00\x00\x00\x00\x00\x00"
+ r append str "\x00\x00\x00\x00\x00\x00\x00\x00"
+ # First zero bit.
+ r append str "\xf0"
+ assert {[r bitpos str 1] == 216}
+ assert {[r bitpos str 1 1] == 216}
+ assert {[r bitpos str 1 2] == 216}
+ assert {[r bitpos str 1 3] == 216}
+ assert {[r bitpos str 1 4] == 216}
+ assert {[r bitpos str 1 5] == 216}
+ assert {[r bitpos str 1 6] == 216}
+ assert {[r bitpos str 1 7] == 216}
+ assert {[r bitpos str 1 8] == 216}
+
+ assert {[r bitpos str 1 1 -1 bit] == 216}
+ assert {[r bitpos str 1 9 -1 bit] == 216}
+ assert {[r bitpos str 1 17 -1 bit] == 216}
+ assert {[r bitpos str 1 25 -1 bit] == 216}
+ assert {[r bitpos str 1 33 -1 bit] == 216}
+ assert {[r bitpos str 1 41 -1 bit] == 216}
+ assert {[r bitpos str 1 49 -1 bit] == 216}
+ assert {[r bitpos str 1 57 -1 bit] == 216}
+ assert {[r bitpos str 1 65 -1 bit] == 216}
+ }
+
+ test {BITPOS bit=1 returns -1 if string is all 0 bits} {
+ r set str ""
+ for {set j 0} {$j < 20} {incr j} {
+ assert {[r bitpos str 1] == -1}
+ assert {[r bitpos str 1 0 -1 bit] == -1}
+ r append str "\x00"
+ }
+ }
+
+ test {BITPOS bit=0 works with intervals} {
+ r set str "\x00\xff\x00"
+ assert {[r bitpos str 0 0 -1] == 0}
+ assert {[r bitpos str 0 1 -1] == 16}
+ assert {[r bitpos str 0 2 -1] == 16}
+ assert {[r bitpos str 0 2 200] == 16}
+ assert {[r bitpos str 0 1 1] == -1}
+
+ assert {[r bitpos str 0 0 -1 bit] == 0}
+ assert {[r bitpos str 0 8 -1 bit] == 16}
+ assert {[r bitpos str 0 16 -1 bit] == 16}
+ assert {[r bitpos str 0 16 200 bit] == 16}
+ assert {[r bitpos str 0 8 8 bit] == -1}
+ }
+
+ test {BITPOS bit=1 works with intervals} {
+ r set str "\x00\xff\x00"
+ assert {[r bitpos str 1 0 -1] == 8}
+ assert {[r bitpos str 1 1 -1] == 8}
+ assert {[r bitpos str 1 2 -1] == -1}
+ assert {[r bitpos str 1 2 200] == -1}
+ assert {[r bitpos str 1 1 1] == 8}
+
+ assert {[r bitpos str 1 0 -1 bit] == 8}
+ assert {[r bitpos str 1 8 -1 bit] == 8}
+ assert {[r bitpos str 1 16 -1 bit] == -1}
+ assert {[r bitpos str 1 16 200 bit] == -1}
+ assert {[r bitpos str 1 8 8 bit] == 8}
+ }
+
+ test {BITPOS bit=0 changes behavior if end is given} {
+ r set str "\xff\xff\xff"
+ assert {[r bitpos str 0] == 24}
+ assert {[r bitpos str 0 0] == 24}
+ assert {[r bitpos str 0 0 -1] == -1}
+ assert {[r bitpos str 0 0 -1 bit] == -1}
+ }
+
+ test {SETBIT/BITFIELD only increase dirty when the value changed} {
+ r del foo{t} foo2{t} foo3{t}
+ set dirty [s rdb_changes_since_last_save]
+
+ # Create a new key, always increase the dirty.
+ r setbit foo{t} 0 0
+ r bitfield foo2{t} set i5 0 0
+ set dirty2 [s rdb_changes_since_last_save]
+ assert {$dirty2 == $dirty + 2}
+
+ # No change.
+ r setbit foo{t} 0 0
+ r bitfield foo2{t} set i5 0 0
+ set dirty3 [s rdb_changes_since_last_save]
+ assert {$dirty3 == $dirty2}
+
+ # Do a change and a no change.
+ r setbit foo{t} 0 1
+ r setbit foo{t} 0 1
+ r setbit foo{t} 0 0
+ r setbit foo{t} 0 0
+ r bitfield foo2{t} set i5 0 1
+ r bitfield foo2{t} set i5 0 1
+ r bitfield foo2{t} set i5 0 0
+ r bitfield foo2{t} set i5 0 0
+ set dirty4 [s rdb_changes_since_last_save]
+ assert {$dirty4 == $dirty3 + 4}
+
+ # BITFIELD INCRBY always increase dirty.
+ r bitfield foo3{t} incrby i5 0 1
+ r bitfield foo3{t} incrby i5 0 1
+ set dirty5 [s rdb_changes_since_last_save]
+ assert {$dirty5 == $dirty4 + 2}
+
+ # Change length only
+ r setbit foo{t} 90 0
+ r bitfield foo2{t} set i5 90 0
+ set dirty6 [s rdb_changes_since_last_save]
+ assert {$dirty6 == $dirty5 + 2}
+ }
+
+ test {BITPOS bit=1 fuzzy testing using SETBIT} {
+ r del str
+ set max 524288; # 64k
+ set first_one_pos -1
+ for {set j 0} {$j < 1000} {incr j} {
+ assert {[r bitpos str 1] == $first_one_pos}
+ assert {[r bitpos str 1 0 -1 bit] == $first_one_pos}
+ set pos [randomInt $max]
+ r setbit str $pos 1
+ if {$first_one_pos == -1 || $first_one_pos > $pos} {
+ # Update the position of the first 1 bit in the array
+ # if the bit we set is on the left of the previous one.
+ set first_one_pos $pos
+ }
+ }
+ }
+
+ test {BITPOS bit=0 fuzzy testing using SETBIT} {
+ set max 524288; # 64k
+ set first_zero_pos $max
+ r set str [string repeat "\xff" [expr $max/8]]
+ for {set j 0} {$j < 1000} {incr j} {
+ assert {[r bitpos str 0] == $first_zero_pos}
+ if {$first_zero_pos == $max} {
+ assert {[r bitpos str 0 0 -1 bit] == -1}
+ } else {
+ assert {[r bitpos str 0 0 -1 bit] == $first_zero_pos}
+ }
+ set pos [randomInt $max]
+ r setbit str $pos 0
+ if {$first_zero_pos > $pos} {
+ # Update the position of the first 0 bit in the array
+ # if the bit we clear is on the left of the previous one.
+ set first_zero_pos $pos
+ }
+ }
+ }
+
+ # This test creates a string of 10 bytes. It has two iterations. One clears
+ # all the bits and sets just one bit and another set all the bits and clears
+ # just one bit. Each iteration loops from bit offset 0 to 79 and uses SETBIT
+ # to set the bit to 0 or 1, and then use BITPOS and BITCOUNT on a few mutations.
+ test {BITPOS/BITCOUNT fuzzy testing using SETBIT} {
+ # We have two start and end ranges, each range used to select a random
+ # position, one for start position and one for end position.
+ proc test_one {start1 end1 start2 end2 pos bit pos_type} {
+ set start [randomRange $start1 $end1]
+ set end [randomRange $start2 $end2]
+ if {$start > $end} {
+ # Swap start and end
+ lassign [list $end $start] start end
+ }
+ set startbit $start
+ set endbit $end
+ # For byte index, we need to generate the real bit index
+ if {[string equal $pos_type byte]} {
+ set startbit [expr $start << 3]
+ set endbit [expr ($end << 3) + 7]
+ }
+ # This means whether the test bit index is in the range.
+ set inrange [expr ($pos >= $startbit && $pos <= $endbit) ? 1: 0]
+ # For bitcount, there are four different results.
+ # $inrange == 0 && $bit == 0, all bits in the range are set, so $endbit - $startbit + 1
+ # $inrange == 0 && $bit == 1, all bits in the range are clear, so 0
+ # $inrange == 1 && $bit == 0, all bits in the range are set but one, so $endbit - $startbit
+ # $inrange == 1 && $bit == 1, all bits in the range are clear but one, so 1
+ set res_count [expr ($endbit - $startbit + 1) * (1 - $bit) + $inrange * [expr $bit ? 1 : -1]]
+ assert {[r bitpos str $bit $start $end $pos_type] == [expr $inrange ? $pos : -1]}
+ assert {[r bitcount str $start $end $pos_type] == $res_count}
+ }
+
+ r del str
+ set max 80;
+ r setbit str [expr $max - 1] 0
+ set bytes [expr $max >> 3]
+ # First iteration sets all bits to 1, then set bit to 0 from 0 to max - 1
+ # Second iteration sets all bits to 0, then set bit to 1 from 0 to max - 1
+ for {set bit 0} {$bit < 2} {incr bit} {
+ r bitop not str str
+ for {set j 0} {$j < $max} {incr j} {
+ r setbit str $j $bit
+
+ # First iteration tests byte index and second iteration tests bit index.
+ foreach {curr end pos_type} [list [expr $j >> 3] $bytes byte $j $max bit] {
+ # start==end set to bit position
+ test_one $curr $curr $curr $curr $j $bit $pos_type
+ # Both start and end are before bit position
+ if {$curr > 0} {
+ test_one 0 $curr 0 $curr $j $bit $pos_type
+ }
+ # Both start and end are after bit position
+ if {$curr < [expr $end - 1]} {
+ test_one [expr $curr + 1] $end [expr $curr + 1] $end $j $bit $pos_type
+ }
+ # start is before and end is after bit position
+ if {$curr > 0 && $curr < [expr $end - 1]} {
+ test_one 0 $curr [expr $curr +1] $end $j $bit $pos_type
+ }
+ }
+
+ # restore bit
+ r setbit str $j [expr 1 - $bit]
+ }
+ }
+ }
+}
+
+run_solo {bitops-large-memory} {
+start_server {tags {"bitops"}} {
+ test "BIT pos larger than UINT_MAX" {
+ set bytes [expr (1 << 29) + 1]
+ set bitpos [expr (1 << 32)]
+ set oldval [lindex [r config get proto-max-bulk-len] 1]
+ r config set proto-max-bulk-len $bytes
+ r setbit mykey $bitpos 1
+ assert_equal $bytes [r strlen mykey]
+ assert_equal 1 [r getbit mykey $bitpos]
+ assert_equal [list 128 128 -1] [r bitfield mykey get u8 $bitpos set u8 $bitpos 255 get i8 $bitpos]
+ assert_equal $bitpos [r bitpos mykey 1]
+ assert_equal $bitpos [r bitpos mykey 1 [expr $bytes - 1]]
+ if {$::accurate} {
+ # set all bits to 1
+ set mega [expr (1 << 23)]
+ set part [string repeat "\xFF" $mega]
+ for {set i 0} {$i < 64} {incr i} {
+ r setrange mykey [expr $i * $mega] $part
+ }
+ r setrange mykey [expr $bytes - 1] "\xFF"
+ assert_equal [expr $bitpos + 8] [r bitcount mykey]
+ assert_equal -1 [r bitpos mykey 0 0 [expr $bytes - 1]]
+ }
+ r config set proto-max-bulk-len $oldval
+ r del mykey
+ } {1} {large-memory}
+
+ test "SETBIT values larger than UINT32_MAX and lzf_compress/lzf_decompress correctly" {
+ set bytes [expr (1 << 32) + 1]
+ set bitpos [expr (1 << 35)]
+ set oldval [lindex [r config get proto-max-bulk-len] 1]
+ r config set proto-max-bulk-len $bytes
+ r setbit mykey $bitpos 1
+ assert_equal $bytes [r strlen mykey]
+ assert_equal 1 [r getbit mykey $bitpos]
+ r debug reload ;# lzf_compress/lzf_decompress when RDB saving/loading.
+ assert_equal 1 [r getbit mykey $bitpos]
+ r config set proto-max-bulk-len $oldval
+ r del mykey
+ } {1} {large-memory needs:debug}
+}
+} ;#run_solo
diff --git a/tests/unit/client-eviction.tcl b/tests/unit/client-eviction.tcl
new file mode 100644
index 0000000..1fc7c02
--- /dev/null
+++ b/tests/unit/client-eviction.tcl
@@ -0,0 +1,586 @@
+tags {"external:skip logreqres:skip"} {
+
+# Get info about a redis client connection:
+# name - name of client we want to query
+# f - field name from "CLIENT LIST" we want to get
+proc client_field {name f} {
+ set clients [split [string trim [r client list]] "\r\n"]
+ set c [lsearch -inline $clients *name=$name*]
+ if {![regexp $f=(\[a-zA-Z0-9-\]+) $c - res]} {
+ error "no client named $name found with field $f"
+ }
+ return $res
+}
+
+proc client_exists {name} {
+ if {[catch { client_field $name tot-mem } e]} {
+ return false
+ }
+ return true
+}
+
+proc gen_client {} {
+ set rr [redis_client]
+ set name "tst_[randstring 4 4 simplealpha]"
+ $rr client setname $name
+ assert {[client_exists $name]}
+ return [list $rr $name]
+}
+
+# Sum a value across all redis client connections:
+# f - the field name from "CLIENT LIST" we want to sum
+proc clients_sum {f} {
+ set sum 0
+ set clients [split [string trim [r client list]] "\r\n"]
+ foreach c $clients {
+ if {![regexp $f=(\[a-zA-Z0-9-\]+) $c - res]} {
+ error "field $f not found in $c"
+ }
+ incr sum $res
+ }
+ return $sum
+}
+
+proc mb {v} {
+ return [expr $v * 1024 * 1024]
+}
+
+proc kb {v} {
+ return [expr $v * 1024]
+}
+
+start_server {} {
+ set maxmemory_clients 3000000
+ r config set maxmemory-clients $maxmemory_clients
+
+ test "client evicted due to large argv" {
+ r flushdb
+ lassign [gen_client] rr cname
+ # Attempt a large multi-bulk command under eviction limit
+ $rr mset k v k2 [string repeat v 1000000]
+ assert_equal [$rr get k] v
+ # Attempt another command, now causing client eviction
+ catch { $rr mset k v k2 [string repeat v $maxmemory_clients] } e
+ assert {![client_exists $cname]}
+ $rr close
+ }
+
+ test "client evicted due to large query buf" {
+ r flushdb
+ lassign [gen_client] rr cname
+ # Attempt to fill the query buff without completing the argument above the limit, causing client eviction
+ catch {
+ $rr write [join [list "*1\r\n\$$maxmemory_clients\r\n" [string repeat v $maxmemory_clients]] ""]
+ $rr flush
+ $rr read
+ } e
+ assert {![client_exists $cname]}
+ $rr close
+ }
+
+ test "client evicted due to percentage of maxmemory" {
+ set maxmemory [mb 6]
+ r config set maxmemory $maxmemory
+ # Set client eviction threshold to 7% of maxmemory
+ set maxmemory_clients_p 7
+ r config set maxmemory-clients $maxmemory_clients_p%
+ r flushdb
+
+ set maxmemory_clients_actual [expr $maxmemory * $maxmemory_clients_p / 100]
+
+ lassign [gen_client] rr cname
+ # Attempt to fill the query buff with only half the percentage threshold verify we're not disconnected
+ set n [expr $maxmemory_clients_actual / 2]
+ $rr write [join [list "*1\r\n\$$n\r\n" [string repeat v $n]] ""]
+ $rr flush
+ set tot_mem [client_field $cname tot-mem]
+ assert {$tot_mem >= $n && $tot_mem < $maxmemory_clients_actual}
+
+ # Attempt to fill the query buff with the percentage threshold of maxmemory and verify we're evicted
+ $rr close
+ lassign [gen_client] rr cname
+ catch {
+ $rr write [join [list "*1\r\n\$$maxmemory_clients_actual\r\n" [string repeat v $maxmemory_clients_actual]] ""]
+ $rr flush
+ } e
+ assert {![client_exists $cname]}
+ $rr close
+
+ # Restore settings
+ r config set maxmemory 0
+ r config set maxmemory-clients $maxmemory_clients
+ }
+
+ test "client evicted due to large multi buf" {
+ r flushdb
+ lassign [gen_client] rr cname
+
+ # Attempt a multi-exec where sum of commands is less than maxmemory_clients
+ $rr multi
+ $rr set k [string repeat v [expr $maxmemory_clients / 4]]
+ $rr set k [string repeat v [expr $maxmemory_clients / 4]]
+ assert_equal [$rr exec] {OK OK}
+
+ # Attempt a multi-exec where sum of commands is more than maxmemory_clients, causing client eviction
+ $rr multi
+ catch {
+ for {set j 0} {$j < 5} {incr j} {
+ $rr set k [string repeat v [expr $maxmemory_clients / 4]]
+ }
+ } e
+ assert {![client_exists $cname]}
+ $rr close
+ }
+
+ test "client evicted due to watched key list" {
+ r flushdb
+ set rr [redis_client]
+
+ # Since watched key list is a small overhead this test uses a minimal maxmemory-clients config
+ set temp_maxmemory_clients 200000
+ r config set maxmemory-clients $temp_maxmemory_clients
+
+ # Append watched keys until list maxes out maxmemory clients and causes client eviction
+ catch {
+ for {set j 0} {$j < $temp_maxmemory_clients} {incr j} {
+ $rr watch $j
+ }
+ } e
+ assert_match {I/O error reading reply} $e
+ $rr close
+
+ # Restore config for next tests
+ r config set maxmemory-clients $maxmemory_clients
+ }
+
+ test "client evicted due to pubsub subscriptions" {
+ r flushdb
+
+ # Since pubsub subscriptions cause a small overhead this test uses a minimal maxmemory-clients config
+ set temp_maxmemory_clients 200000
+ r config set maxmemory-clients $temp_maxmemory_clients
+
+ # Test eviction due to pubsub patterns
+ set rr [redis_client]
+ # Add patterns until list maxes out maxmemory clients and causes client eviction
+ catch {
+ for {set j 0} {$j < $temp_maxmemory_clients} {incr j} {
+ $rr psubscribe $j
+ }
+ } e
+ assert_match {I/O error reading reply} $e
+ $rr close
+
+ # Test eviction due to pubsub channels
+ set rr [redis_client]
+ # Subscribe to global channels until list maxes out maxmemory clients and causes client eviction
+ catch {
+ for {set j 0} {$j < $temp_maxmemory_clients} {incr j} {
+ $rr subscribe $j
+ }
+ } e
+ assert_match {I/O error reading reply} $e
+ $rr close
+
+ # Test eviction due to sharded pubsub channels
+ set rr [redis_client]
+ # Subscribe to sharded pubsub channels until list maxes out maxmemory clients and causes client eviction
+ catch {
+ for {set j 0} {$j < $temp_maxmemory_clients} {incr j} {
+ $rr ssubscribe $j
+ }
+ } e
+ assert_match {I/O error reading reply} $e
+ $rr close
+
+ # Restore config for next tests
+ r config set maxmemory-clients $maxmemory_clients
+ }
+
+ test "client evicted due to tracking redirection" {
+ r flushdb
+ set rr [redis_client]
+ set redirected_c [redis_client]
+ $redirected_c client setname redirected_client
+ set redir_id [$redirected_c client id]
+ $redirected_c SUBSCRIBE __redis__:invalidate
+ $rr client tracking on redirect $redir_id bcast
+ # Use a big key name to fill the redirected tracking client's buffer quickly
+ set key_length [expr 1024*200]
+ set long_key [string repeat k $key_length]
+ # Use a script so we won't need to pass the long key name when dirtying it in the loop
+ set script_sha [$rr script load "redis.call('incr', '$long_key')"]
+
+ # Pause serverCron so it won't update memory usage since we're testing the update logic when
+ # writing tracking redirection output
+ r debug pause-cron 1
+
+ # Read and write to same (long) key until redirected_client's buffers cause it to be evicted
+ catch {
+ while true {
+ set mem [client_field redirected_client tot-mem]
+ assert {$mem < $maxmemory_clients}
+ $rr evalsha $script_sha 0
+ }
+ } e
+ assert_match {no client named redirected_client found*} $e
+
+ r debug pause-cron 0
+ $rr close
+ $redirected_c close
+ } {0} {needs:debug}
+
+ test "client evicted due to client tracking prefixes" {
+ r flushdb
+ set rr [redis_client]
+
+ # Since tracking prefixes list is a small overhead this test uses a minimal maxmemory-clients config
+ set temp_maxmemory_clients 200000
+ r config set maxmemory-clients $temp_maxmemory_clients
+
+ # Append tracking prefixes until list maxes out maxmemory clients and causes client eviction
+ # Combine more prefixes in each command to speed up the test. Because we did not actually count
+ # the memory usage of all prefixes, see getClientMemoryUsage, so we can not use larger prefixes
+ # to speed up the test here.
+ catch {
+ for {set j 0} {$j < $temp_maxmemory_clients} {incr j} {
+ $rr client tracking on prefix [format a%09s $j] prefix [format b%09s $j] prefix [format c%09s $j] bcast
+ }
+ } e
+ assert_match {I/O error reading reply} $e
+ $rr close
+
+ # Restore config for next tests
+ r config set maxmemory-clients $maxmemory_clients
+ }
+
+ test "client evicted due to output buf" {
+ r flushdb
+ r setrange k 200000 v
+ set rr [redis_deferring_client]
+ $rr client setname test_client
+ $rr flush
+ assert {[$rr read] == "OK"}
+ # Attempt a large response under eviction limit
+ $rr get k
+ $rr flush
+ assert {[string length [$rr read]] == 200001}
+ set mem [client_field test_client tot-mem]
+ assert {$mem < $maxmemory_clients}
+
+ # Fill output buff in loop without reading it and make sure
+ # we're eventually disconnected, but before reaching maxmemory_clients
+ while true {
+ if { [catch {
+ set mem [client_field test_client tot-mem]
+ assert {$mem < $maxmemory_clients}
+ $rr get k
+ $rr flush
+ } e]} {
+ assert {![client_exists test_client]}
+ break
+ }
+ }
+ $rr close
+ }
+
+ foreach {no_evict} {on off} {
+ test "client no-evict $no_evict" {
+ r flushdb
+ r client setname control
+ r client no-evict on ;# Avoid evicting the main connection
+ lassign [gen_client] rr cname
+ $rr client no-evict $no_evict
+
+ # Overflow maxmemory-clients
+ set qbsize [expr {$maxmemory_clients + 1}]
+ if {[catch {
+ $rr write [join [list "*1\r\n\$$qbsize\r\n" [string repeat v $qbsize]] ""]
+ $rr flush
+ wait_for_condition 200 10 {
+ [client_field $cname qbuf] == $qbsize
+ } else {
+ fail "Failed to fill qbuf for test"
+ }
+ } e] && $no_evict == off} {
+ assert {![client_exists $cname]}
+ } elseif {$no_evict == on} {
+ assert {[client_field $cname tot-mem] > $maxmemory_clients}
+ }
+ $rr close
+ }
+ }
+}
+
+start_server {} {
+ set server_pid [s process_id]
+ set maxmemory_clients [mb 10]
+ set obuf_limit [mb 3]
+ r config set maxmemory-clients $maxmemory_clients
+ r config set client-output-buffer-limit "normal $obuf_limit 0 0"
+
+ test "avoid client eviction when client is freed by output buffer limit" {
+ r flushdb
+ set obuf_size [expr {$obuf_limit + [mb 1]}]
+ r setrange k $obuf_size v
+ set rr1 [redis_client]
+ $rr1 client setname "qbuf-client"
+ set rr2 [redis_deferring_client]
+ $rr2 client setname "obuf-client1"
+ assert_equal [$rr2 read] OK
+ set rr3 [redis_deferring_client]
+ $rr3 client setname "obuf-client2"
+ assert_equal [$rr3 read] OK
+
+ # Occupy client's query buff with less than output buffer limit left to exceed maxmemory-clients
+ set qbsize [expr {$maxmemory_clients - $obuf_size}]
+ $rr1 write [join [list "*1\r\n\$$qbsize\r\n" [string repeat v $qbsize]] ""]
+ $rr1 flush
+ # Wait for qbuff to be as expected
+ wait_for_condition 200 10 {
+ [client_field qbuf-client qbuf] == $qbsize
+ } else {
+ fail "Failed to fill qbuf for test"
+ }
+
+ # Make the other two obuf-clients pass obuf limit and also pass maxmemory-clients
+ # We use two obuf-clients to make sure that even if client eviction is attempted
+ # between two command processing (with no sleep) we don't perform any client eviction
+ # because the obuf limit is enforced with precedence.
+ pause_process $server_pid
+ $rr2 get k
+ $rr2 flush
+ $rr3 get k
+ $rr3 flush
+ resume_process $server_pid
+ r ping ;# make sure a full event loop cycle is processed before issuing CLIENT LIST
+
+ # Validate obuf-clients were disconnected (because of obuf limit)
+ catch {client_field obuf-client1 name} e
+ assert_match {no client named obuf-client1 found*} $e
+ catch {client_field obuf-client2 name} e
+ assert_match {no client named obuf-client2 found*} $e
+
+ # Validate qbuf-client is still connected and wasn't evicted
+ assert_equal [client_field qbuf-client name] {qbuf-client}
+
+ $rr1 close
+ $rr2 close
+ $rr3 close
+ }
+}
+
+start_server {} {
+ test "decrease maxmemory-clients causes client eviction" {
+ set maxmemory_clients [mb 4]
+ set client_count 10
+ set qbsize [expr ($maxmemory_clients - [mb 1]) / $client_count]
+ r config set maxmemory-clients $maxmemory_clients
+
+
+ # Make multiple clients consume together roughly 1mb less than maxmemory_clients
+ set rrs {}
+ for {set j 0} {$j < $client_count} {incr j} {
+ set rr [redis_client]
+ lappend rrs $rr
+ $rr client setname client$j
+ $rr write [join [list "*2\r\n\$$qbsize\r\n" [string repeat v $qbsize]] ""]
+ $rr flush
+ wait_for_condition 200 10 {
+ [client_field client$j qbuf] >= $qbsize
+ } else {
+ fail "Failed to fill qbuf for test"
+ }
+ }
+
+ # Make sure all clients are still connected
+ set connected_clients [llength [lsearch -all [split [string trim [r client list]] "\r\n"] *name=client*]]
+ assert {$connected_clients == $client_count}
+
+ # Decrease maxmemory_clients and expect client eviction
+ r config set maxmemory-clients [expr $maxmemory_clients / 2]
+ set connected_clients [llength [lsearch -all [split [string trim [r client list]] "\r\n"] *name=client*]]
+ assert {$connected_clients > 0 && $connected_clients < $client_count}
+
+ foreach rr $rrs {$rr close}
+ }
+}
+
+start_server {} {
+ test "evict clients only until below limit" {
+ set client_count 10
+ set client_mem [mb 1]
+ r debug replybuffer resizing 0
+ r config set maxmemory-clients 0
+ r client setname control
+ r client no-evict on
+
+ # Make multiple clients consume together roughly 1mb less than maxmemory_clients
+ set total_client_mem 0
+ set max_client_mem 0
+ set rrs {}
+ for {set j 0} {$j < $client_count} {incr j} {
+ set rr [redis_client]
+ lappend rrs $rr
+ $rr client setname client$j
+ $rr write [join [list "*2\r\n\$$client_mem\r\n" [string repeat v $client_mem]] ""]
+ $rr flush
+ wait_for_condition 200 10 {
+ [client_field client$j tot-mem] >= $client_mem
+ } else {
+ fail "Failed to fill qbuf for test"
+ }
+ # In theory all these clients should use the same amount of memory (~1mb). But in practice
+ # some allocators (libc) can return different allocation sizes for the same malloc argument causing
+ # some clients to use slightly more memory than others. We find the largest client and make sure
+ # all clients are roughly the same size (+-1%). Then we can safely set the client eviction limit and
+ # expect consistent results in the test.
+ set cmem [client_field client$j tot-mem]
+ if {$max_client_mem > 0} {
+ set size_ratio [expr $max_client_mem.0/$cmem.0]
+ assert_range $size_ratio 0.99 1.01
+ }
+ if {$cmem > $max_client_mem} {
+ set max_client_mem $cmem
+ }
+ }
+
+ # Make sure all clients are still connected
+ set connected_clients [llength [lsearch -all [split [string trim [r client list]] "\r\n"] *name=client*]]
+ assert {$connected_clients == $client_count}
+
+ # Set maxmemory-clients to accommodate half our clients (taking into account the control client)
+ set maxmemory_clients [expr ($max_client_mem * $client_count) / 2 + [client_field control tot-mem]]
+ r config set maxmemory-clients $maxmemory_clients
+
+ # Make sure total used memory is below maxmemory_clients
+ set total_client_mem [clients_sum tot-mem]
+ assert {$total_client_mem <= $maxmemory_clients}
+
+ # Make sure we have only half of our clients now
+ set connected_clients [llength [lsearch -all [split [string trim [r client list]] "\r\n"] *name=client*]]
+ assert {$connected_clients == [expr $client_count / 2]}
+
+ # Restore the reply buffer resize to default
+ r debug replybuffer resizing 1
+
+ foreach rr $rrs {$rr close}
+ } {} {needs:debug}
+}
+
+start_server {} {
+ test "evict clients in right order (large to small)" {
+ # Note that each size step needs to be at least x2 larger than previous step
+ # because of how the client-eviction size bucketing works
+ set sizes [list [kb 128] [mb 1] [mb 3]]
+ set clients_per_size 3
+ r client setname control
+ r client no-evict on
+ r config set maxmemory-clients 0
+ r debug replybuffer resizing 0
+
+ # Run over all sizes and create some clients using up that size
+ set total_client_mem 0
+ set rrs {}
+ for {set i 0} {$i < [llength $sizes]} {incr i} {
+ set size [lindex $sizes $i]
+
+ for {set j 0} {$j < $clients_per_size} {incr j} {
+ set rr [redis_client]
+ lappend rrs $rr
+ $rr client setname client-$i
+ $rr write [join [list "*2\r\n\$$size\r\n" [string repeat v $size]] ""]
+ $rr flush
+ }
+ set client_mem [client_field client-$i tot-mem]
+
+ # Update our size list based on actual used up size (this is usually
+ # slightly more than expected because of allocator bins
+ assert {$client_mem >= $size}
+ set sizes [lreplace $sizes $i $i $client_mem]
+
+ # Account total client memory usage
+ incr total_mem [expr $clients_per_size * $client_mem]
+ }
+
+ # Make sure all clients are connected
+ set clients [split [string trim [r client list]] "\r\n"]
+ for {set i 0} {$i < [llength $sizes]} {incr i} {
+ assert_equal [llength [lsearch -all $clients "*name=client-$i *"]] $clients_per_size
+ }
+
+ # For each size reduce maxmemory-clients so relevant clients should be evicted
+ # do this from largest to smallest
+ foreach size [lreverse $sizes] {
+ set control_mem [client_field control tot-mem]
+ set total_mem [expr $total_mem - $clients_per_size * $size]
+ r config set maxmemory-clients [expr $total_mem + $control_mem]
+ set clients [split [string trim [r client list]] "\r\n"]
+ # Verify only relevant clients were evicted
+ for {set i 0} {$i < [llength $sizes]} {incr i} {
+ set verify_size [lindex $sizes $i]
+ set count [llength [lsearch -all $clients "*name=client-$i *"]]
+ if {$verify_size < $size} {
+ assert_equal $count $clients_per_size
+ } else {
+ assert_equal $count 0
+ }
+ }
+ }
+
+ # Restore the reply buffer resize to default
+ r debug replybuffer resizing 1
+
+ foreach rr $rrs {$rr close}
+ } {} {needs:debug}
+}
+
+start_server {} {
+ foreach type {"client no-evict" "maxmemory-clients disabled"} {
+ r flushall
+ r client no-evict on
+ r config set maxmemory-clients 0
+
+ test "client total memory grows during $type" {
+ r setrange k [mb 1] v
+ set rr [redis_client]
+ $rr client setname test_client
+ if {$type eq "client no-evict"} {
+ $rr client no-evict on
+ r config set maxmemory-clients 1
+ }
+ $rr deferred 1
+
+ # Fill output buffer in loop without reading it and make sure
+ # the tot-mem of client has increased (OS buffers didn't swallow it)
+ # and eviction not occurring.
+ while {true} {
+ $rr get k
+ $rr flush
+ after 10
+ if {[client_field test_client tot-mem] > [mb 10]} {
+ break
+ }
+ }
+
+ # Trigger the client eviction, by flipping the no-evict flag to off
+ if {$type eq "client no-evict"} {
+ $rr client no-evict off
+ } else {
+ r config set maxmemory-clients 1
+ }
+
+ # wait for the client to be disconnected
+ wait_for_condition 5000 50 {
+ ![client_exists test_client]
+ } else {
+ puts [r client list]
+ fail "client was not disconnected"
+ }
+ $rr close
+ }
+ }
+}
+
+} ;# tags
+
diff --git a/tests/unit/cluster/announced-endpoints.tcl b/tests/unit/cluster/announced-endpoints.tcl
new file mode 100644
index 0000000..941a8e0
--- /dev/null
+++ b/tests/unit/cluster/announced-endpoints.tcl
@@ -0,0 +1,42 @@
+start_cluster 2 2 {tags {external:skip cluster}} {
+
+ test "Test change cluster-announce-port and cluster-announce-tls-port at runtime" {
+ set baseport [lindex [R 0 config get port] 1]
+ set count [expr [llength $::servers] +1 ]
+ set used_port [find_available_port $baseport $count]
+
+ R 0 config set cluster-announce-tls-port $used_port
+ R 0 config set cluster-announce-port $used_port
+
+ assert_match "*:$used_port@*" [R 0 CLUSTER NODES]
+ wait_for_condition 50 100 {
+ [string match "*:$used_port@*" [R 1 CLUSTER NODES]]
+ } else {
+ fail "Cluster announced port was not propagated via gossip"
+ }
+
+ R 0 config set cluster-announce-tls-port 0
+ R 0 config set cluster-announce-port 0
+ assert_match "*:$baseport@*" [R 0 CLUSTER NODES]
+ }
+
+ test "Test change cluster-announce-bus-port at runtime" {
+ set baseport [lindex [R 0 config get port] 1]
+ set count [expr [llength $::servers] +1 ]
+ set used_port [find_available_port $baseport $count]
+
+ # Verify config set cluster-announce-bus-port
+ R 0 config set cluster-announce-bus-port $used_port
+ assert_match "*@$used_port *" [R 0 CLUSTER NODES]
+ wait_for_condition 50 100 {
+ [string match "*@$used_port *" [R 1 CLUSTER NODES]]
+ } else {
+ fail "Cluster announced port was not propagated via gossip"
+ }
+
+ # Verify restore default cluster-announce-port
+ set base_bus_port [expr $baseport + 10000]
+ R 0 config set cluster-announce-bus-port 0
+ assert_match "*@$base_bus_port *" [R 0 CLUSTER NODES]
+ }
+}
diff --git a/tests/unit/cluster/cli.tcl b/tests/unit/cluster/cli.tcl
new file mode 100644
index 0000000..76e9721
--- /dev/null
+++ b/tests/unit/cluster/cli.tcl
@@ -0,0 +1,416 @@
+# Primitive tests on cluster-enabled redis using redis-cli
+
+source tests/support/cli.tcl
+
+# make sure the test infra won't use SELECT
+set old_singledb $::singledb
+set ::singledb 1
+
+# cluster creation is complicated with TLS, and the current tests don't really need that coverage
+tags {tls:skip external:skip cluster} {
+
+# start three servers
+set base_conf [list cluster-enabled yes cluster-node-timeout 1000]
+start_multiple_servers 3 [list overrides $base_conf] {
+
+ set node1 [srv 0 client]
+ set node2 [srv -1 client]
+ set node3 [srv -2 client]
+ set node3_pid [srv -2 pid]
+ set node3_rd [redis_deferring_client -2]
+
+ test {Create 3 node cluster} {
+ exec src/redis-cli --cluster-yes --cluster create \
+ 127.0.0.1:[srv 0 port] \
+ 127.0.0.1:[srv -1 port] \
+ 127.0.0.1:[srv -2 port]
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+ }
+
+ test "Run blocking command on cluster node3" {
+ # key9184688 is mapped to slot 10923 (first slot of node 3)
+ $node3_rd brpop key9184688 0
+ $node3_rd flush
+
+ wait_for_condition 50 100 {
+ [s -2 blocked_clients] eq {1}
+ } else {
+ fail "Client not blocked"
+ }
+ }
+
+ test "Perform a Resharding" {
+ exec src/redis-cli --cluster-yes --cluster reshard 127.0.0.1:[srv -2 port] \
+ --cluster-to [$node1 cluster myid] \
+ --cluster-from [$node3 cluster myid] \
+ --cluster-slots 1
+ }
+
+ test "Verify command got unblocked after resharding" {
+ # this (read) will wait for the node3 to realize the new topology
+ assert_error {*MOVED*} {$node3_rd read}
+
+ # verify there are no blocked clients
+ assert_equal [s 0 blocked_clients] {0}
+ assert_equal [s -1 blocked_clients] {0}
+ assert_equal [s -2 blocked_clients] {0}
+ }
+
+ test "Wait for cluster to be stable" {
+ # Cluster check just verifies the config state is self-consistent,
+ # waiting for cluster_state to be okay is an independent check that all the
+ # nodes actually believe each other are healthy, prevent cluster down error.
+ wait_for_condition 1000 50 {
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv 0 port]}] == 0 &&
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv -1 port]}] == 0 &&
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv -2 port]}] == 0 &&
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+ }
+
+ set node1_rd [redis_deferring_client 0]
+
+ test "use previous hostip in \"cluster-preferred-endpoint-type unknown-endpoint\" mode" {
+
+ # backup and set cluster-preferred-endpoint-type unknown-endpoint
+ set endpoint_type_before_set [lindex [split [$node1 CONFIG GET cluster-preferred-endpoint-type] " "] 1]
+ $node1 CONFIG SET cluster-preferred-endpoint-type unknown-endpoint
+
+ # when redis-cli not in cluster mode, return MOVE with empty host
+ set slot_for_foo [$node1 CLUSTER KEYSLOT foo]
+ assert_error "*MOVED $slot_for_foo :*" {$node1 set foo bar}
+
+ # when in cluster mode, redirect using previous hostip
+ assert_equal "[exec src/redis-cli -h 127.0.0.1 -p [srv 0 port] -c set foo bar]" {OK}
+ assert_match "[exec src/redis-cli -h 127.0.0.1 -p [srv 0 port] -c get foo]" {bar}
+
+ assert_equal [$node1 CONFIG SET cluster-preferred-endpoint-type "$endpoint_type_before_set"] {OK}
+ }
+
+ test "Sanity test push cmd after resharding" {
+ assert_error {*MOVED*} {$node3 lpush key9184688 v1}
+
+ $node1_rd brpop key9184688 0
+ $node1_rd flush
+
+ wait_for_condition 50 100 {
+ [s 0 blocked_clients] eq {1}
+ } else {
+ puts "Client not blocked"
+ puts "read from blocked client: [$node1_rd read]"
+ fail "Client not blocked"
+ }
+
+ $node1 lpush key9184688 v2
+ assert_equal {key9184688 v2} [$node1_rd read]
+ }
+
+ $node3_rd close
+
+ test "Run blocking command again on cluster node1" {
+ $node1 del key9184688
+ # key9184688 is mapped to slot 10923 which has been moved to node1
+ $node1_rd brpop key9184688 0
+ $node1_rd flush
+
+ wait_for_condition 50 100 {
+ [s 0 blocked_clients] eq {1}
+ } else {
+ fail "Client not blocked"
+ }
+ }
+
+ test "Kill a cluster node and wait for fail state" {
+ # kill node3 in cluster
+ pause_process $node3_pid
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {fail} &&
+ [CI 1 cluster_state] eq {fail}
+ } else {
+ fail "Cluster doesn't fail"
+ }
+ }
+
+ test "Verify command got unblocked after cluster failure" {
+ assert_error {*CLUSTERDOWN*} {$node1_rd read}
+
+ # verify there are no blocked clients
+ assert_equal [s 0 blocked_clients] {0}
+ assert_equal [s -1 blocked_clients] {0}
+ }
+
+ resume_process $node3_pid
+ $node1_rd close
+
+} ;# stop servers
+
+# Test redis-cli -- cluster create, add-node, call.
+# Test that functions are propagated on add-node
+start_multiple_servers 5 [list overrides $base_conf] {
+
+ set node4_rd [redis_client -3]
+ set node5_rd [redis_client -4]
+
+ test {Functions are added to new node on redis-cli cluster add-node} {
+ exec src/redis-cli --cluster-yes --cluster create \
+ 127.0.0.1:[srv 0 port] \
+ 127.0.0.1:[srv -1 port] \
+ 127.0.0.1:[srv -2 port]
+
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # upload a function to all the cluster
+ exec src/redis-cli --cluster-yes --cluster call 127.0.0.1:[srv 0 port] \
+ FUNCTION LOAD {#!lua name=TEST
+ redis.register_function('test', function() return 'hello' end)
+ }
+
+ # adding node to the cluster
+ exec src/redis-cli --cluster-yes --cluster add-node \
+ 127.0.0.1:[srv -3 port] \
+ 127.0.0.1:[srv 0 port]
+
+ wait_for_cluster_size 4
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok} &&
+ [CI 3 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # make sure 'test' function was added to the new node
+ assert_equal {{library_name TEST engine LUA functions {{name test description {} flags {}}}}} [$node4_rd FUNCTION LIST]
+
+ # add function to node 5
+ assert_equal {TEST} [$node5_rd FUNCTION LOAD {#!lua name=TEST
+ redis.register_function('test', function() return 'hello' end)
+ }]
+
+ # make sure functions was added to node 5
+ assert_equal {{library_name TEST engine LUA functions {{name test description {} flags {}}}}} [$node5_rd FUNCTION LIST]
+
+ # adding node 5 to the cluster should failed because it already contains the 'test' function
+ catch {
+ exec src/redis-cli --cluster-yes --cluster add-node \
+ 127.0.0.1:[srv -4 port] \
+ 127.0.0.1:[srv 0 port]
+ } e
+ assert_match {*node already contains functions*} $e
+ }
+} ;# stop servers
+
+# Test redis-cli --cluster create, add-node.
+# Test that one slot can be migrated to and then away from the new node.
+test {Migrate the last slot away from a node using redis-cli} {
+ start_multiple_servers 4 [list overrides $base_conf] {
+
+ # Create a cluster of 3 nodes
+ exec src/redis-cli --cluster-yes --cluster create \
+ 127.0.0.1:[srv 0 port] \
+ 127.0.0.1:[srv -1 port] \
+ 127.0.0.1:[srv -2 port]
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # Insert some data
+ assert_equal OK [exec src/redis-cli -c -p [srv 0 port] SET foo bar]
+ set slot [exec src/redis-cli -c -p [srv 0 port] CLUSTER KEYSLOT foo]
+
+ # Add new node to the cluster
+ exec src/redis-cli --cluster-yes --cluster add-node \
+ 127.0.0.1:[srv -3 port] \
+ 127.0.0.1:[srv 0 port]
+
+ # First we wait for new node to be recognized by entire cluster
+ wait_for_cluster_size 4
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok} &&
+ [CI 3 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ set newnode_r [redis_client -3]
+ set newnode_id [$newnode_r CLUSTER MYID]
+
+ # Find out which node has the key "foo" by asking the new node for a
+ # redirect.
+ catch { $newnode_r get foo } e
+ assert_match "MOVED $slot *" $e
+ lassign [split [lindex $e 2] :] owner_host owner_port
+ set owner_r [redis $owner_host $owner_port 0 $::tls]
+ set owner_id [$owner_r CLUSTER MYID]
+
+ # Move slot to new node using plain Redis commands
+ assert_equal OK [$newnode_r CLUSTER SETSLOT $slot IMPORTING $owner_id]
+ assert_equal OK [$owner_r CLUSTER SETSLOT $slot MIGRATING $newnode_id]
+ assert_equal {foo} [$owner_r CLUSTER GETKEYSINSLOT $slot 10]
+ assert_equal OK [$owner_r MIGRATE 127.0.0.1 [srv -3 port] "" 0 5000 KEYS foo]
+ assert_equal OK [$newnode_r CLUSTER SETSLOT $slot NODE $newnode_id]
+ assert_equal OK [$owner_r CLUSTER SETSLOT $slot NODE $newnode_id]
+
+ # Using --cluster check make sure we won't get `Not all slots are covered by nodes`.
+ # Wait for the cluster to become stable make sure the cluster is up during MIGRATE.
+ wait_for_condition 1000 50 {
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv 0 port]}] == 0 &&
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv -1 port]}] == 0 &&
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv -2 port]}] == 0 &&
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv -3 port]}] == 0 &&
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok} &&
+ [CI 3 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # Move the only slot back to original node using redis-cli
+ exec src/redis-cli --cluster reshard 127.0.0.1:[srv -3 port] \
+ --cluster-from $newnode_id \
+ --cluster-to $owner_id \
+ --cluster-slots 1 \
+ --cluster-yes
+
+ # The empty node will become a replica of the new owner before the
+ # `MOVED` check, so let's wait for the cluster to become stable.
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok} &&
+ [CI 3 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # Check that the key foo has been migrated back to the original owner.
+ catch { $newnode_r get foo } e
+ assert_equal "MOVED $slot $owner_host:$owner_port" $e
+
+ # Check that the empty node has turned itself into a replica of the new
+ # owner and that the new owner knows that.
+ wait_for_condition 1000 50 {
+ [string match "*slave*" [$owner_r CLUSTER REPLICAS $owner_id]]
+ } else {
+ fail "Empty node didn't turn itself into a replica."
+ }
+ }
+}
+
+# Test redis-cli --cluster create, add-node with cluster-port.
+# Create five nodes, three with custom cluster_port and two with default values.
+start_server [list overrides [list cluster-enabled yes cluster-node-timeout 1 cluster-port [find_available_port $::baseport $::portcount]]] {
+start_server [list overrides [list cluster-enabled yes cluster-node-timeout 1]] {
+start_server [list overrides [list cluster-enabled yes cluster-node-timeout 1 cluster-port [find_available_port $::baseport $::portcount]]] {
+start_server [list overrides [list cluster-enabled yes cluster-node-timeout 1]] {
+start_server [list overrides [list cluster-enabled yes cluster-node-timeout 1 cluster-port [find_available_port $::baseport $::portcount]]] {
+
+ # The first three are used to test --cluster create.
+ # The last two are used to test --cluster add-node
+ set node1_rd [redis_client 0]
+ set node2_rd [redis_client -1]
+ set node3_rd [redis_client -2]
+ set node4_rd [redis_client -3]
+ set node5_rd [redis_client -4]
+
+ test {redis-cli --cluster create with cluster-port} {
+ exec src/redis-cli --cluster-yes --cluster create \
+ 127.0.0.1:[srv 0 port] \
+ 127.0.0.1:[srv -1 port] \
+ 127.0.0.1:[srv -2 port]
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # Make sure each node can meet other nodes
+ assert_equal 3 [CI 0 cluster_known_nodes]
+ assert_equal 3 [CI 1 cluster_known_nodes]
+ assert_equal 3 [CI 2 cluster_known_nodes]
+ }
+
+ test {redis-cli --cluster add-node with cluster-port} {
+ # Adding node to the cluster (without cluster-port)
+ exec src/redis-cli --cluster-yes --cluster add-node \
+ 127.0.0.1:[srv -3 port] \
+ 127.0.0.1:[srv 0 port]
+
+ wait_for_cluster_size 4
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok} &&
+ [CI 3 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # Adding node to the cluster (with cluster-port)
+ exec src/redis-cli --cluster-yes --cluster add-node \
+ 127.0.0.1:[srv -4 port] \
+ 127.0.0.1:[srv 0 port]
+
+ wait_for_cluster_size 5
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok} &&
+ [CI 3 cluster_state] eq {ok} &&
+ [CI 4 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+
+ # Make sure each node can meet other nodes
+ assert_equal 5 [CI 0 cluster_known_nodes]
+ assert_equal 5 [CI 1 cluster_known_nodes]
+ assert_equal 5 [CI 2 cluster_known_nodes]
+ assert_equal 5 [CI 3 cluster_known_nodes]
+ assert_equal 5 [CI 4 cluster_known_nodes]
+ }
+# stop 5 servers
+}
+}
+}
+}
+}
+
+} ;# tags
+
+set ::singledb $old_singledb
diff --git a/tests/unit/cluster/cluster-response-tls.tcl b/tests/unit/cluster/cluster-response-tls.tcl
new file mode 100644
index 0000000..a099fa7
--- /dev/null
+++ b/tests/unit/cluster/cluster-response-tls.tcl
@@ -0,0 +1,110 @@
+source tests/support/cluster.tcl
+
+proc get_port_from_moved_error {e} {
+ set ip_port [lindex [split $e " "] 2]
+ return [lindex [split $ip_port ":"] 1]
+}
+
+proc get_pport_by_port {port} {
+ foreach srv $::servers {
+ set srv_port [dict get $srv port]
+ if {$port == $srv_port} {
+ return [dict get $srv pport]
+ }
+ }
+ return 0
+}
+
+proc get_port_from_node_info {line} {
+ set fields [split $line " "]
+ set addr [lindex $fields 1]
+ set ip_port [lindex [split $addr "@"] 0]
+ return [lindex [split $ip_port ":"] 1]
+}
+
+proc cluster_response_tls {tls_cluster} {
+
+ test "CLUSTER SLOTS with different connection type -- tls-cluster $tls_cluster" {
+ set slots1 [R 0 cluster slots]
+ set pport [srv 0 pport]
+ set cluster_client [redis_cluster 127.0.0.1:$pport 0]
+ set slots2 [$cluster_client cluster slots]
+ $cluster_client close
+ # Compare the ports in the first row
+ assert_no_match [lindex $slots1 0 2 1] [lindex $slots2 0 2 1]
+ }
+
+ test "CLUSTER NODES return port according to connection type -- tls-cluster $tls_cluster" {
+ set nodes [R 0 cluster nodes]
+ set port1 [get_port_from_node_info [lindex [split $nodes "\r\n"] 0]]
+ set pport [srv 0 pport]
+ set cluster_client [redis_cluster 127.0.0.1:$pport 0]
+ set nodes [$cluster_client cluster nodes]
+ set port2 [get_port_from_node_info [lindex [split $nodes "\r\n"] 0]]
+ $cluster_client close
+ assert_not_equal $port1 $port2
+ }
+
+ set cluster [redis_cluster 127.0.0.1:[srv 0 port]]
+ set cluster_pport [redis_cluster 127.0.0.1:[srv 0 pport] 0]
+ $cluster refresh_nodes_map
+
+ test "Set many keys in the cluster -- tls-cluster $tls_cluster" {
+ for {set i 0} {$i < 5000} {incr i} {
+ $cluster set $i $i
+ assert { [$cluster get $i] eq $i }
+ }
+ }
+
+ test "Test cluster responses during migration of slot x -- tls-cluster $tls_cluster" {
+ set slot 10
+ array set nodefrom [$cluster masternode_for_slot $slot]
+ array set nodeto [$cluster masternode_notfor_slot $slot]
+ $nodeto(link) cluster setslot $slot importing $nodefrom(id)
+ $nodefrom(link) cluster setslot $slot migrating $nodeto(id)
+
+ # Get a key from that slot
+ set key [$nodefrom(link) cluster GETKEYSINSLOT $slot "1"]
+ # MOVED REPLY
+ catch {$nodeto(link) set $key "newVal"} e_moved1
+ assert_match "*MOVED*" $e_moved1
+ # ASK REPLY
+ catch {$nodefrom(link) set "abc{$key}" "newVal"} e_ask1
+ assert_match "*ASK*" $e_ask1
+
+ # UNSTABLE REPLY
+ assert_error "*TRYAGAIN*" {$nodefrom(link) mset "a{$key}" "newVal" $key "newVal2"}
+
+ # Connecting using another protocol
+ array set nodefrom_pport [$cluster_pport masternode_for_slot $slot]
+ array set nodeto_pport [$cluster_pport masternode_notfor_slot $slot]
+
+ # MOVED REPLY
+ catch {$nodeto_pport(link) set $key "newVal"} e_moved2
+ assert_match "*MOVED*" $e_moved2
+ # ASK REPLY
+ catch {$nodefrom_pport(link) set "abc{$key}" "newVal"} e_ask2
+ assert_match "*ASK*" $e_ask2
+ # Compare MOVED error's port
+ set port1 [get_port_from_moved_error $e_moved1]
+ set port2 [get_port_from_moved_error $e_moved2]
+ assert_not_equal $port1 $port2
+ assert_equal $port1 $nodefrom(port)
+ assert_equal $port2 [get_pport_by_port $nodefrom(port)]
+ # Compare ASK error's port
+ set port1 [get_port_from_moved_error $e_ask1]
+ set port2 [get_port_from_moved_error $e_ask2]
+ assert_not_equal $port1 $port2
+ assert_equal $port1 $nodeto(port)
+ assert_equal $port2 [get_pport_by_port $nodeto(port)]
+ }
+}
+
+if {$::tls} {
+ start_cluster 3 3 {tags {external:skip cluster tls} overrides {tls-cluster yes tls-replication yes}} {
+ cluster_response_tls yes
+ }
+ start_cluster 3 3 {tags {external:skip cluster tls} overrides {tls-cluster no tls-replication no}} {
+ cluster_response_tls no
+ }
+}
diff --git a/tests/unit/cluster/hostnames.tcl b/tests/unit/cluster/hostnames.tcl
new file mode 100644
index 0000000..f318240
--- /dev/null
+++ b/tests/unit/cluster/hostnames.tcl
@@ -0,0 +1,203 @@
+proc get_slot_field {slot_output shard_id node_id attrib_id} {
+ return [lindex [lindex [lindex $slot_output $shard_id] $node_id] $attrib_id]
+}
+
+# Start a cluster with 3 masters and 4 replicas.
+# These tests rely on specific node ordering, so make sure no node fails over.
+start_cluster 3 4 {tags {external:skip cluster} overrides {cluster-replica-no-failover yes}} {
+test "Set cluster hostnames and verify they are propagated" {
+ for {set j 0} {$j < [llength $::servers]} {incr j} {
+ R $j config set cluster-announce-hostname "host-$j.com"
+ }
+
+ wait_for_condition 50 100 {
+ [are_hostnames_propagated "host-*.com"] eq 1
+ } else {
+ fail "cluster hostnames were not propagated"
+ }
+
+ # Now that everything is propagated, assert everyone agrees
+ wait_for_cluster_propagation
+}
+
+test "Update hostnames and make sure they are all eventually propagated" {
+ for {set j 0} {$j < [llength $::servers]} {incr j} {
+ R $j config set cluster-announce-hostname "host-updated-$j.com"
+ }
+
+ wait_for_condition 50 100 {
+ [are_hostnames_propagated "host-updated-*.com"] eq 1
+ } else {
+ fail "cluster hostnames were not propagated"
+ }
+
+ # Now that everything is propagated, assert everyone agrees
+ wait_for_cluster_propagation
+}
+
+test "Remove hostnames and make sure they are all eventually propagated" {
+ for {set j 0} {$j < [llength $::servers]} {incr j} {
+ R $j config set cluster-announce-hostname ""
+ }
+
+ wait_for_condition 50 100 {
+ [are_hostnames_propagated ""] eq 1
+ } else {
+ fail "cluster hostnames were not propagated"
+ }
+
+ # Now that everything is propagated, assert everyone agrees
+ wait_for_cluster_propagation
+}
+
+test "Verify cluster-preferred-endpoint-type behavior for redirects and info" {
+ R 0 config set cluster-announce-hostname "me.com"
+ R 1 config set cluster-announce-hostname ""
+ R 2 config set cluster-announce-hostname "them.com"
+
+ wait_for_cluster_propagation
+
+ # Verify default behavior
+ set slot_result [R 0 cluster slots]
+ assert_equal "" [lindex [get_slot_field $slot_result 0 2 0] 1]
+ assert_equal "" [lindex [get_slot_field $slot_result 2 2 0] 1]
+ assert_equal "hostname" [lindex [get_slot_field $slot_result 0 2 3] 0]
+ assert_equal "me.com" [lindex [get_slot_field $slot_result 0 2 3] 1]
+ assert_equal "hostname" [lindex [get_slot_field $slot_result 2 2 3] 0]
+ assert_equal "them.com" [lindex [get_slot_field $slot_result 2 2 3] 1]
+
+ # Redirect will use the IP address
+ catch {R 0 set foo foo} redir_err
+ assert_match "MOVED * 127.0.0.1:*" $redir_err
+
+ # Verify prefer hostname behavior
+ R 0 config set cluster-preferred-endpoint-type hostname
+
+ set slot_result [R 0 cluster slots]
+ assert_equal "me.com" [get_slot_field $slot_result 0 2 0]
+ assert_equal "them.com" [get_slot_field $slot_result 2 2 0]
+
+ # Redirect should use hostname
+ catch {R 0 set foo foo} redir_err
+ assert_match "MOVED * them.com:*" $redir_err
+
+ # Redirect to an unknown hostname returns ?
+ catch {R 0 set barfoo bar} redir_err
+ assert_match "MOVED * ?:*" $redir_err
+
+ # Verify unknown hostname behavior
+ R 0 config set cluster-preferred-endpoint-type unknown-endpoint
+
+ # Verify default behavior
+ set slot_result [R 0 cluster slots]
+ assert_equal "ip" [lindex [get_slot_field $slot_result 0 2 3] 0]
+ assert_equal "127.0.0.1" [lindex [get_slot_field $slot_result 0 2 3] 1]
+ assert_equal "ip" [lindex [get_slot_field $slot_result 2 2 3] 0]
+ assert_equal "127.0.0.1" [lindex [get_slot_field $slot_result 2 2 3] 1]
+ assert_equal "ip" [lindex [get_slot_field $slot_result 1 2 3] 0]
+ assert_equal "127.0.0.1" [lindex [get_slot_field $slot_result 1 2 3] 1]
+ # Not required by the protocol, but IP comes before hostname
+ assert_equal "hostname" [lindex [get_slot_field $slot_result 0 2 3] 2]
+ assert_equal "me.com" [lindex [get_slot_field $slot_result 0 2 3] 3]
+ assert_equal "hostname" [lindex [get_slot_field $slot_result 2 2 3] 2]
+ assert_equal "them.com" [lindex [get_slot_field $slot_result 2 2 3] 3]
+
+ # This node doesn't have a hostname
+ assert_equal 2 [llength [get_slot_field $slot_result 1 2 3]]
+
+ # Redirect should use empty string
+ catch {R 0 set foo foo} redir_err
+ assert_match "MOVED * :*" $redir_err
+
+ R 0 config set cluster-preferred-endpoint-type ip
+}
+
+test "Verify the nodes configured with prefer hostname only show hostname for new nodes" {
+ # Have everyone forget node 6 and isolate it from the cluster.
+ isolate_node 6
+
+ # Set hostnames for the masters, now that the node is isolated
+ R 0 config set cluster-announce-hostname "shard-1.com"
+ R 1 config set cluster-announce-hostname "shard-2.com"
+ R 2 config set cluster-announce-hostname "shard-3.com"
+
+ # Prevent Node 0 and Node 6 from properly meeting,
+ # they'll hang in the handshake phase. This allows us to
+ # test the case where we "know" about it but haven't
+ # successfully retrieved information about it yet.
+ R 0 DEBUG DROP-CLUSTER-PACKET-FILTER 0
+ R 6 DEBUG DROP-CLUSTER-PACKET-FILTER 0
+
+ # Have a replica meet the isolated node
+ R 3 cluster meet 127.0.0.1 [srv -6 port]
+
+ # Wait for the isolated node to learn about the rest of the cluster,
+ # which correspond to a single entry in cluster nodes. Note this
+ # doesn't mean the isolated node has successfully contacted each
+ # node.
+ wait_for_condition 50 100 {
+ [llength [split [R 6 CLUSTER NODES] "\n"]] eq [expr [llength $::servers] + 1]
+ } else {
+ fail "Isolated node didn't learn about the rest of the cluster *"
+ }
+
+ # Now, we wait until the two nodes that aren't filtering packets
+ # to accept our isolated nodes connections. At this point they will
+ # start showing up in cluster slots.
+ wait_for_condition 50 100 {
+ [llength [R 6 CLUSTER SLOTS]] eq 2
+ } else {
+ fail "Node did not learn about the 2 shards it can talk to"
+ }
+ set slot_result [R 6 CLUSTER SLOTS]
+ assert_equal [lindex [get_slot_field $slot_result 0 2 3] 1] "shard-2.com"
+ assert_equal [lindex [get_slot_field $slot_result 1 2 3] 1] "shard-3.com"
+
+ # Also make sure we know about the isolated master, we
+ # just can't reach it.
+ set master_id [R 0 CLUSTER MYID]
+ assert_match "*$master_id*" [R 6 CLUSTER NODES]
+
+ # Stop dropping cluster packets, and make sure everything
+ # stabilizes
+ R 0 DEBUG DROP-CLUSTER-PACKET-FILTER -1
+ R 6 DEBUG DROP-CLUSTER-PACKET-FILTER -1
+
+ # This operation sometimes spikes to around 5 seconds to resolve the state,
+ # so it has a higher timeout.
+ wait_for_condition 50 500 {
+ [llength [R 6 CLUSTER SLOTS]] eq 3
+ } else {
+ fail "Node did not learn about the 2 shards it can talk to"
+ }
+ set slot_result [R 6 CLUSTER SLOTS]
+ assert_equal [lindex [get_slot_field $slot_result 0 2 3] 1] "shard-1.com"
+ assert_equal [lindex [get_slot_field $slot_result 1 2 3] 1] "shard-2.com"
+ assert_equal [lindex [get_slot_field $slot_result 2 2 3] 1] "shard-3.com"
+}
+
+test "Test restart will keep hostname information" {
+ # Set a new hostname, reboot and make sure it sticks
+ R 0 config set cluster-announce-hostname "restart-1.com"
+
+ # Store the hostname in the config
+ R 0 config rewrite
+
+ restart_server 0 true false
+ set slot_result [R 0 CLUSTER SLOTS]
+ assert_equal [lindex [get_slot_field $slot_result 0 2 3] 1] "restart-1.com"
+
+ # As a sanity check, make sure everyone eventually agrees
+ wait_for_cluster_propagation
+}
+
+test "Test hostname validation" {
+ catch {R 0 config set cluster-announce-hostname [string repeat x 256]} err
+ assert_match "*Hostnames must be less than 256 characters*" $err
+ catch {R 0 config set cluster-announce-hostname "?.com"} err
+ assert_match "*Hostnames may only contain alphanumeric characters, hyphens or dots*" $err
+
+ # Note this isn't a valid hostname, but it passes our internal validation
+ R 0 config set cluster-announce-hostname "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-."
+}
+}
diff --git a/tests/unit/cluster/human-announced-nodename.tcl b/tests/unit/cluster/human-announced-nodename.tcl
new file mode 100644
index 0000000..a595ca6
--- /dev/null
+++ b/tests/unit/cluster/human-announced-nodename.tcl
@@ -0,0 +1,29 @@
+# Check if cluster's view of human announced nodename is reported in logs
+start_cluster 3 0 {tags {external:skip cluster}} {
+ test "Set cluster human announced nodename and let it propagate" {
+ for {set j 0} {$j < [llength $::servers]} {incr j} {
+ R $j config set cluster-announce-hostname "host-$j.com"
+ R $j config set cluster-announce-human-nodename "nodename-$j"
+ }
+
+ # We wait for everyone to agree on the hostnames. Since they are gossiped
+ # the same way as nodenames, it implies everyone knows the nodenames too.
+ wait_for_condition 50 100 {
+ [are_hostnames_propagated "host-*.com"] eq 1
+ } else {
+ fail "cluster hostnames were not propagated"
+ }
+ }
+
+ test "Human nodenames are visible in log messages" {
+ # Pause instance 0, so everyone thinks it is dead
+ pause_process [srv 0 pid]
+
+ # We're going to use a message we will know will be sent, node unreachable,
+ # since it includes the other node gossiping.
+ wait_for_log_messages -1 {"*Node * (nodename-2) reported node * (nodename-0) as not reachable*"} 0 20 500
+ wait_for_log_messages -2 {"*Node * (nodename-1) reported node * (nodename-0) as not reachable*"} 0 20 500
+
+ resume_process [srv 0 pid]
+ }
+}
diff --git a/tests/unit/cluster/links.tcl b/tests/unit/cluster/links.tcl
new file mode 100644
index 0000000..a202c37
--- /dev/null
+++ b/tests/unit/cluster/links.tcl
@@ -0,0 +1,292 @@
+proc get_links_with_peer {this_instance_id peer_nodename} {
+ set links [R $this_instance_id cluster links]
+ set links_with_peer {}
+ foreach l $links {
+ if {[dict get $l node] eq $peer_nodename} {
+ lappend links_with_peer $l
+ }
+ }
+ return $links_with_peer
+}
+
+# Return the entry in CLUSTER LINKS output by instance identified by `this_instance_id` that
+# corresponds to the link established toward a peer identified by `peer_nodename`
+proc get_link_to_peer {this_instance_id peer_nodename} {
+ set links_with_peer [get_links_with_peer $this_instance_id $peer_nodename]
+ foreach l $links_with_peer {
+ if {[dict get $l direction] eq "to"} {
+ return $l
+ }
+ }
+ return {}
+}
+
+# Return the entry in CLUSTER LINKS output by instance identified by `this_instance_id` that
+# corresponds to the link accepted from a peer identified by `peer_nodename`
+proc get_link_from_peer {this_instance_id peer_nodename} {
+ set links_with_peer [get_links_with_peer $this_instance_id $peer_nodename]
+ foreach l $links_with_peer {
+ if {[dict get $l direction] eq "from"} {
+ return $l
+ }
+ }
+ return {}
+}
+
+# Reset cluster links to their original state
+proc reset_links {id} {
+ set limit [lindex [R $id CONFIG get cluster-link-sendbuf-limit] 1]
+
+ # Set a 1 byte limit and wait for cluster cron to run
+ # (executes every 100ms) and terminate links
+ R $id CONFIG SET cluster-link-sendbuf-limit 1
+ after 150
+
+ # Reset limit
+ R $id CONFIG SET cluster-link-sendbuf-limit $limit
+
+ # Wait until the cluster links come back up for each node
+ wait_for_condition 50 100 {
+ [number_of_links $id] == [expr [number_of_peers $id] * 2]
+ } else {
+ fail "Cluster links did not come back up"
+ }
+}
+
+proc number_of_peers {id} {
+ expr [llength $::servers] - 1
+}
+
+proc number_of_links {id} {
+ llength [R $id cluster links]
+}
+
+proc publish_messages {server num_msgs msg_size} {
+ for {set i 0} {$i < $num_msgs} {incr i} {
+ $server PUBLISH channel [string repeat "x" $msg_size]
+ }
+}
+
+start_cluster 1 2 {tags {external:skip cluster}} {
+ set primary_id 0
+ set replica1_id 1
+
+ set primary [Rn $primary_id]
+ set replica1 [Rn $replica1_id]
+
+ test "Broadcast message across a cluster shard while a cluster link is down" {
+ set replica1_node_id [$replica1 CLUSTER MYID]
+
+ set channelname ch3
+
+ # subscribe on replica1
+ set subscribeclient1 [redis_deferring_client -1]
+ $subscribeclient1 deferred 1
+ $subscribeclient1 SSUBSCRIBE $channelname
+ $subscribeclient1 read
+
+ # subscribe on replica2
+ set subscribeclient2 [redis_deferring_client -2]
+ $subscribeclient2 deferred 1
+ $subscribeclient2 SSUBSCRIBE $channelname
+ $subscribeclient2 read
+
+ # Verify number of links with cluster stable state
+ assert_equal [expr [number_of_peers $primary_id]*2] [number_of_links $primary_id]
+
+ # Disconnect the cluster between primary and replica1 and publish a message.
+ $primary MULTI
+ $primary DEBUG CLUSTERLINK KILL TO $replica1_node_id
+ $primary SPUBLISH $channelname hello
+ set res [$primary EXEC]
+
+ # Verify no client exists on the primary to receive the published message.
+ assert_equal $res {OK 0}
+
+ # Wait for all the cluster links are healthy
+ wait_for_condition 50 100 {
+ [number_of_peers $primary_id]*2 == [number_of_links $primary_id]
+ } else {
+ fail "All peer links couldn't be established"
+ }
+
+ # Publish a message afterwards.
+ $primary SPUBLISH $channelname world
+
+ # Verify replica1 has received only (world) / hello is lost.
+ assert_equal "smessage ch3 world" [$subscribeclient1 read]
+
+ # Verify replica2 has received both messages (hello/world)
+ assert_equal "smessage ch3 hello" [$subscribeclient2 read]
+ assert_equal "smessage ch3 world" [$subscribeclient2 read]
+ } {} {needs:debug}
+}
+
+start_cluster 3 0 {tags {external:skip cluster}} {
+ test "Each node has two links with each peer" {
+ for {set id 0} {$id < [llength $::servers]} {incr id} {
+ # Assert that from point of view of each node, there are two links for
+ # each peer. It might take a while for cluster to stabilize so wait up
+ # to 5 seconds.
+ wait_for_condition 50 100 {
+ [number_of_peers $id]*2 == [number_of_links $id]
+ } else {
+ assert_equal [expr [number_of_peers $id]*2] [number_of_links $id]
+ }
+
+ set nodes [get_cluster_nodes $id]
+ set links [R $id cluster links]
+
+ # For each peer there should be exactly one
+ # link "to" it and one link "from" it.
+ foreach n $nodes {
+ if {[cluster_has_flag $n myself]} continue
+ set peer [dict get $n id]
+ set to 0
+ set from 0
+ foreach l $links {
+ if {[dict get $l node] eq $peer} {
+ if {[dict get $l direction] eq "to"} {
+ incr to
+ } elseif {[dict get $l direction] eq "from"} {
+ incr from
+ }
+ }
+ }
+ assert {$to eq 1}
+ assert {$from eq 1}
+ }
+ }
+ }
+
+ test {Validate cluster links format} {
+ set lines [R 0 cluster links]
+ foreach l $lines {
+ if {$l eq {}} continue
+ assert_equal [llength $l] 12
+ assert_equal 1 [dict exists $l "direction"]
+ assert_equal 1 [dict exists $l "node"]
+ assert_equal 1 [dict exists $l "create-time"]
+ assert_equal 1 [dict exists $l "events"]
+ assert_equal 1 [dict exists $l "send-buffer-allocated"]
+ assert_equal 1 [dict exists $l "send-buffer-used"]
+ }
+ }
+
+ set primary1_id 0
+ set primary2_id 1
+
+ set primary1 [Rn $primary1_id]
+ set primary2 [Rn $primary2_id]
+
+ test "Disconnect link when send buffer limit reached" {
+ # On primary1, set timeout to 1 hour so links won't get disconnected due to timeouts
+ set oldtimeout [lindex [$primary1 CONFIG get cluster-node-timeout] 1]
+ $primary1 CONFIG set cluster-node-timeout [expr 60*60*1000]
+
+ # Get primary1's links with primary2
+ set primary2_name [dict get [cluster_get_myself $primary2_id] id]
+ set orig_link_p1_to_p2 [get_link_to_peer $primary1_id $primary2_name]
+ set orig_link_p1_from_p2 [get_link_from_peer $primary1_id $primary2_name]
+
+ # On primary1, set cluster link send buffer limit to 256KB, which is large enough to not be
+ # overflowed by regular gossip messages but also small enough that it doesn't take too much
+ # memory to overflow it. If it is set too high, Redis may get OOM killed by kernel before this
+ # limit is overflowed in some RAM-limited test environments.
+ set oldlimit [lindex [$primary1 CONFIG get cluster-link-sendbuf-limit] 1]
+ $primary1 CONFIG set cluster-link-sendbuf-limit [expr 256*1024]
+ assert {[CI $primary1_id total_cluster_links_buffer_limit_exceeded] eq 0}
+
+ # To manufacture an ever-growing send buffer from primary1 to primary2,
+ # make primary2 unresponsive.
+ set primary2_pid [srv [expr -1*$primary2_id] pid]
+ pause_process $primary2_pid
+
+ # On primary1, send 128KB Pubsub messages in a loop until the send buffer of the link from
+ # primary1 to primary2 exceeds buffer limit therefore be dropped.
+ # For the send buffer to grow, we need to first exhaust TCP send buffer of primary1 and TCP
+ # receive buffer of primary2 first. The sizes of these two buffers vary by OS, but 100 128KB
+ # messages should be sufficient.
+ set i 0
+ wait_for_condition 100 0 {
+ [catch {incr i} e] == 0 &&
+ [catch {$primary1 publish channel [prepare_value [expr 128*1024]]} e] == 0 &&
+ [catch {after 500} e] == 0 &&
+ [CI $primary1_id total_cluster_links_buffer_limit_exceeded] >= 1
+ } else {
+ fail "Cluster link not freed as expected"
+ }
+
+ # A new link to primary2 should have been recreated
+ set new_link_p1_to_p2 [get_link_to_peer $primary1_id $primary2_name]
+ assert {[dict get $new_link_p1_to_p2 create-time] > [dict get $orig_link_p1_to_p2 create-time]}
+
+ # Link from primary2 should not be affected
+ set same_link_p1_from_p2 [get_link_from_peer $primary1_id $primary2_name]
+ assert {[dict get $same_link_p1_from_p2 create-time] eq [dict get $orig_link_p1_from_p2 create-time]}
+
+ # Revive primary2
+ resume_process $primary2_pid
+
+ # Reset configs on primary1 so config changes don't leak out to other tests
+ $primary1 CONFIG set cluster-node-timeout $oldtimeout
+ $primary1 CONFIG set cluster-link-sendbuf-limit $oldlimit
+
+ reset_links $primary1_id
+ }
+
+ test "Link memory increases with publishes" {
+ set server_id 0
+ set server [Rn $server_id]
+ set msg_size 10000
+ set num_msgs 10
+
+ # Remove any sendbuf limit
+ $primary1 CONFIG set cluster-link-sendbuf-limit 0
+
+ # Publish ~100KB to one of the servers
+ $server MULTI
+ $server INFO memory
+ publish_messages $server $num_msgs $msg_size
+ $server INFO memory
+ set res [$server EXEC]
+
+ set link_mem_before_pubs [getInfoProperty $res mem_cluster_links]
+
+ # Remove the first half of the response string which contains the
+ # first "INFO memory" results and search for the property again
+ set res [string range $res [expr [string length $res] / 2] end]
+ set link_mem_after_pubs [getInfoProperty $res mem_cluster_links]
+
+ # We expect the memory to have increased by more than
+ # the culmulative size of the publish messages
+ set mem_diff_floor [expr $msg_size * $num_msgs]
+ set mem_diff [expr $link_mem_after_pubs - $link_mem_before_pubs]
+ assert {$mem_diff > $mem_diff_floor}
+
+ # Reset links to ensure no leftover data for the next test
+ reset_links $server_id
+ }
+
+ test "Link memory resets after publish messages flush" {
+ set server [Rn 0]
+ set msg_size 100000
+ set num_msgs 10
+
+ set link_mem_before [status $server mem_cluster_links]
+
+ # Publish ~1MB to one of the servers
+ $server MULTI
+ publish_messages $server $num_msgs $msg_size
+ $server EXEC
+
+ # Wait until the cluster link memory has returned to below the pre-publish value.
+ # We can't guarantee it returns to the exact same value since gossip messages
+ # can cause the values to fluctuate.
+ wait_for_condition 1000 500 {
+ [status $server mem_cluster_links] <= $link_mem_before
+ } else {
+ fail "Cluster link memory did not settle back to expected range"
+ }
+ }
+}
diff --git a/tests/unit/cluster/misc.tcl b/tests/unit/cluster/misc.tcl
new file mode 100644
index 0000000..cd66697
--- /dev/null
+++ b/tests/unit/cluster/misc.tcl
@@ -0,0 +1,26 @@
+start_cluster 2 2 {tags {external:skip cluster}} {
+ test {Key lazy expires during key migration} {
+ R 0 DEBUG SET-ACTIVE-EXPIRE 0
+
+ set key_slot [R 0 CLUSTER KEYSLOT FOO]
+ R 0 set FOO BAR PX 10
+ set src_id [R 0 CLUSTER MYID]
+ set trg_id [R 1 CLUSTER MYID]
+ R 0 CLUSTER SETSLOT $key_slot MIGRATING $trg_id
+ R 1 CLUSTER SETSLOT $key_slot IMPORTING $src_id
+ after 11
+ assert_error {ASK*} {R 0 GET FOO}
+ R 0 ping
+ } {PONG}
+
+ test "Coverage: Basic cluster commands" {
+ assert_equal {OK} [R 0 CLUSTER saveconfig]
+
+ set id [R 0 CLUSTER MYID]
+ assert_equal {0} [R 0 CLUSTER count-failure-reports $id]
+
+ R 0 flushall
+ assert_equal {OK} [R 0 CLUSTER flushslots]
+ }
+}
+
diff --git a/tests/unit/cluster/multi-slot-operations.tcl b/tests/unit/cluster/multi-slot-operations.tcl
new file mode 100644
index 0000000..cc7bb7a
--- /dev/null
+++ b/tests/unit/cluster/multi-slot-operations.tcl
@@ -0,0 +1,109 @@
+# This test uses a custom slot allocation for testing
+proc cluster_allocate_with_continuous_slots_local {n} {
+ R 0 cluster ADDSLOTSRANGE 0 3276
+ R 1 cluster ADDSLOTSRANGE 3277 6552
+ R 2 cluster ADDSLOTSRANGE 6553 9828
+ R 3 cluster ADDSLOTSRANGE 9829 13104
+ R 4 cluster ADDSLOTSRANGE 13105 16383
+}
+
+start_cluster 5 0 {tags {external:skip cluster}} {
+
+set master1 [srv 0 "client"]
+set master2 [srv -1 "client"]
+set master3 [srv -2 "client"]
+set master4 [srv -3 "client"]
+set master5 [srv -4 "client"]
+
+test "Continuous slots distribution" {
+ assert_match "* 0-3276*" [$master1 CLUSTER NODES]
+ assert_match "* 3277-6552*" [$master2 CLUSTER NODES]
+ assert_match "* 6553-9828*" [$master3 CLUSTER NODES]
+ assert_match "* 9829-13104*" [$master4 CLUSTER NODES]
+ assert_match "* 13105-16383*" [$master5 CLUSTER NODES]
+ assert_match "*0 3276*" [$master1 CLUSTER SLOTS]
+ assert_match "*3277 6552*" [$master2 CLUSTER SLOTS]
+ assert_match "*6553 9828*" [$master3 CLUSTER SLOTS]
+ assert_match "*9829 13104*" [$master4 CLUSTER SLOTS]
+ assert_match "*13105 16383*" [$master5 CLUSTER SLOTS]
+
+ $master1 CLUSTER DELSLOTSRANGE 3001 3050
+ assert_match "* 0-3000 3051-3276*" [$master1 CLUSTER NODES]
+ assert_match "*0 3000*3051 3276*" [$master1 CLUSTER SLOTS]
+
+ $master2 CLUSTER DELSLOTSRANGE 5001 5500
+ assert_match "* 3277-5000 5501-6552*" [$master2 CLUSTER NODES]
+ assert_match "*3277 5000*5501 6552*" [$master2 CLUSTER SLOTS]
+
+ $master3 CLUSTER DELSLOTSRANGE 7001 7100 8001 8500
+ assert_match "* 6553-7000 7101-8000 8501-9828*" [$master3 CLUSTER NODES]
+ assert_match "*6553 7000*7101 8000*8501 9828*" [$master3 CLUSTER SLOTS]
+
+ $master4 CLUSTER DELSLOTSRANGE 11001 12000 12101 12200
+ assert_match "* 9829-11000 12001-12100 12201-13104*" [$master4 CLUSTER NODES]
+ assert_match "*9829 11000*12001 12100*12201 13104*" [$master4 CLUSTER SLOTS]
+
+ $master5 CLUSTER DELSLOTSRANGE 13501 14000 15001 16000
+ assert_match "* 13105-13500 14001-15000 16001-16383*" [$master5 CLUSTER NODES]
+ assert_match "*13105 13500*14001 15000*16001 16383*" [$master5 CLUSTER SLOTS]
+}
+
+test "ADDSLOTS command with several boundary conditions test suite" {
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster ADDSLOTS 3001 aaa}
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster ADDSLOTS 3001 -1000}
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster ADDSLOTS 3001 30003}
+
+ assert_error "ERR Slot 3200 is already busy" {R 0 cluster ADDSLOTS 3200}
+ assert_error "ERR Slot 8501 is already busy" {R 0 cluster ADDSLOTS 8501}
+
+ assert_error "ERR Slot 3001 specified multiple times" {R 0 cluster ADDSLOTS 3001 3002 3001}
+}
+
+test "ADDSLOTSRANGE command with several boundary conditions test suite" {
+ # Add multiple slots with incorrect argument number
+ assert_error "ERR wrong number of arguments for 'cluster|addslotsrange' command" {R 0 cluster ADDSLOTSRANGE 3001 3020 3030}
+
+ # Add multiple slots with invalid input slot
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster ADDSLOTSRANGE 3001 3020 3030 aaa}
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster ADDSLOTSRANGE 3001 3020 3030 70000}
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster ADDSLOTSRANGE 3001 3020 -1000 3030}
+
+ # Add multiple slots when start slot number is greater than the end slot
+ assert_error "ERR start slot number 3030 is greater than end slot number 3025" {R 0 cluster ADDSLOTSRANGE 3001 3020 3030 3025}
+
+ # Add multiple slots with busy slot
+ assert_error "ERR Slot 3200 is already busy" {R 0 cluster ADDSLOTSRANGE 3001 3020 3200 3250}
+
+ # Add multiple slots with assigned multiple times
+ assert_error "ERR Slot 3001 specified multiple times" {R 0 cluster ADDSLOTSRANGE 3001 3020 3001 3020}
+}
+
+test "DELSLOTSRANGE command with several boundary conditions test suite" {
+ # Delete multiple slots with incorrect argument number
+ assert_error "ERR wrong number of arguments for 'cluster|delslotsrange' command" {R 0 cluster DELSLOTSRANGE 1000 2000 2100}
+ assert_match "* 0-3000 3051-3276*" [$master1 CLUSTER NODES]
+ assert_match "*0 3000*3051 3276*" [$master1 CLUSTER SLOTS]
+
+ # Delete multiple slots with invalid input slot
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster DELSLOTSRANGE 1000 2000 2100 aaa}
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster DELSLOTSRANGE 1000 2000 2100 70000}
+ assert_error "ERR Invalid or out of range slot" {R 0 cluster DELSLOTSRANGE 1000 2000 -2100 2200}
+ assert_match "* 0-3000 3051-3276*" [$master1 CLUSTER NODES]
+ assert_match "*0 3000*3051 3276*" [$master1 CLUSTER SLOTS]
+
+ # Delete multiple slots when start slot number is greater than the end slot
+ assert_error "ERR start slot number 5800 is greater than end slot number 5750" {R 1 cluster DELSLOTSRANGE 5600 5700 5800 5750}
+ assert_match "* 3277-5000 5501-6552*" [$master2 CLUSTER NODES]
+ assert_match "*3277 5000*5501 6552*" [$master2 CLUSTER SLOTS]
+
+ # Delete multiple slots with already unassigned
+ assert_error "ERR Slot 7001 is already unassigned" {R 2 cluster DELSLOTSRANGE 7001 7100 9000 9200}
+ assert_match "* 6553-7000 7101-8000 8501-9828*" [$master3 CLUSTER NODES]
+ assert_match "*6553 7000*7101 8000*8501 9828*" [$master3 CLUSTER SLOTS]
+
+ # Delete multiple slots with assigned multiple times
+ assert_error "ERR Slot 12500 specified multiple times" {R 3 cluster DELSLOTSRANGE 12500 12600 12500 12600}
+ assert_match "* 9829-11000 12001-12100 12201-13104*" [$master4 CLUSTER NODES]
+ assert_match "*9829 11000*12001 12100*12201 13104*" [$master4 CLUSTER SLOTS]
+}
+} cluster_allocate_with_continuous_slots_local
diff --git a/tests/unit/cluster/scripting.tcl b/tests/unit/cluster/scripting.tcl
new file mode 100644
index 0000000..1ade36e
--- /dev/null
+++ b/tests/unit/cluster/scripting.tcl
@@ -0,0 +1,70 @@
+start_cluster 1 0 {tags {external:skip cluster}} {
+
+ test {Eval scripts with shebangs and functions default to no cross slots} {
+ # Test that scripts with shebang block cross slot operations
+ assert_error "ERR Script attempted to access keys that do not hash to the same slot*" {
+ r 0 eval {#!lua
+ redis.call('set', 'foo', 'bar')
+ redis.call('set', 'bar', 'foo')
+ return 'OK'
+ } 0}
+
+ # Test the functions by default block cross slot operations
+ r 0 function load REPLACE {#!lua name=crossslot
+ local function test_cross_slot(keys, args)
+ redis.call('set', 'foo', 'bar')
+ redis.call('set', 'bar', 'foo')
+ return 'OK'
+ end
+
+ redis.register_function('test_cross_slot', test_cross_slot)}
+ assert_error "ERR Script attempted to access keys that do not hash to the same slot*" {r FCALL test_cross_slot 0}
+ }
+
+ test {Cross slot commands are allowed by default for eval scripts and with allow-cross-slot-keys flag} {
+ # Old style lua scripts are allowed to access cross slot operations
+ r 0 eval "redis.call('set', 'foo', 'bar'); redis.call('set', 'bar', 'foo')" 0
+
+ # scripts with allow-cross-slot-keys flag are allowed
+ r 0 eval {#!lua flags=allow-cross-slot-keys
+ redis.call('set', 'foo', 'bar'); redis.call('set', 'bar', 'foo')
+ } 0
+
+ # Functions with allow-cross-slot-keys flag are allowed
+ r 0 function load REPLACE {#!lua name=crossslot
+ local function test_cross_slot(keys, args)
+ redis.call('set', 'foo', 'bar')
+ redis.call('set', 'bar', 'foo')
+ return 'OK'
+ end
+
+ redis.register_function{function_name='test_cross_slot', callback=test_cross_slot, flags={ 'allow-cross-slot-keys' }}}
+ r FCALL test_cross_slot 0
+ }
+
+ test {Cross slot commands are also blocked if they disagree with pre-declared keys} {
+ assert_error "ERR Script attempted to access keys that do not hash to the same slot*" {
+ r 0 eval {#!lua
+ redis.call('set', 'foo', 'bar')
+ return 'OK'
+ } 1 bar}
+ }
+
+ test "Function no-cluster flag" {
+ R 0 function load {#!lua name=test
+ redis.register_function{function_name='f1', callback=function() return 'hello' end, flags={'no-cluster'}}
+ }
+ catch {R 0 fcall f1 0} e
+ assert_match {*Can not run script on cluster, 'no-cluster' flag is set*} $e
+ }
+
+ test "Script no-cluster flag" {
+ catch {
+ R 0 eval {#!lua flags=no-cluster
+ return 1
+ } 0
+ } e
+
+ assert_match {*Can not run script on cluster, 'no-cluster' flag is set*} $e
+ }
+}
diff --git a/tests/unit/cluster/slot-ownership.tcl b/tests/unit/cluster/slot-ownership.tcl
new file mode 100644
index 0000000..0f3e3cc
--- /dev/null
+++ b/tests/unit/cluster/slot-ownership.tcl
@@ -0,0 +1,61 @@
+start_cluster 2 2 {tags {external:skip cluster}} {
+
+ test "Verify that slot ownership transfer through gossip propagates deletes to replicas" {
+ assert {[s -2 role] eq {slave}}
+ wait_for_condition 1000 50 {
+ [s -2 master_link_status] eq {up}
+ } else {
+ fail "Instance #2 master link status is not up"
+ }
+
+ assert {[s -3 role] eq {slave}}
+ wait_for_condition 1000 50 {
+ [s -3 master_link_status] eq {up}
+ } else {
+ fail "Instance #3 master link status is not up"
+ }
+
+ # Set a single key that will be used to test deletion
+ set key "FOO"
+ R 0 SET $key TEST
+ set key_slot [R 0 cluster keyslot $key]
+ set slot_keys_num [R 0 cluster countkeysinslot $key_slot]
+ assert {$slot_keys_num > 0}
+
+ # Wait for replica to have the key
+ R 2 readonly
+ wait_for_condition 1000 50 {
+ [R 2 exists $key] eq "1"
+ } else {
+ fail "Test key was not replicated"
+ }
+
+ assert_equal [R 2 cluster countkeysinslot $key_slot] $slot_keys_num
+
+ # Assert other shards in cluster doesn't have the key
+ assert_equal [R 1 cluster countkeysinslot $key_slot] "0"
+ assert_equal [R 3 cluster countkeysinslot $key_slot] "0"
+
+ set nodeid [R 1 cluster myid]
+
+ R 1 cluster bumpepoch
+ # Move $key_slot to node 1
+ assert_equal [R 1 cluster setslot $key_slot node $nodeid] "OK"
+
+ wait_for_cluster_propagation
+
+ # src master will delete keys in the slot
+ wait_for_condition 50 100 {
+ [R 0 cluster countkeysinslot $key_slot] eq 0
+ } else {
+ fail "master 'countkeysinslot $key_slot' did not eq 0"
+ }
+
+ # src replica will delete keys in the slot
+ wait_for_condition 50 100 {
+ [R 2 cluster countkeysinslot $key_slot] eq 0
+ } else {
+ fail "replica 'countkeysinslot $key_slot' did not eq 0"
+ }
+ }
+}
diff --git a/tests/unit/dump.tcl b/tests/unit/dump.tcl
new file mode 100644
index 0000000..dd75952
--- /dev/null
+++ b/tests/unit/dump.tcl
@@ -0,0 +1,410 @@
+start_server {tags {"dump"}} {
+ test {DUMP / RESTORE are able to serialize / unserialize a simple key} {
+ r set foo bar
+ set encoded [r dump foo]
+ r del foo
+ list [r exists foo] [r restore foo 0 $encoded] [r ttl foo] [r get foo]
+ } {0 OK -1 bar}
+
+ test {RESTORE can set an arbitrary expire to the materialized key} {
+ r set foo bar
+ set encoded [r dump foo]
+ r del foo
+ r restore foo 5000 $encoded
+ set ttl [r pttl foo]
+ assert_range $ttl 3000 5000
+ r get foo
+ } {bar}
+
+ test {RESTORE can set an expire that overflows a 32 bit integer} {
+ r set foo bar
+ set encoded [r dump foo]
+ r del foo
+ r restore foo 2569591501 $encoded
+ set ttl [r pttl foo]
+ assert_range $ttl (2569591501-3000) 2569591501
+ r get foo
+ } {bar}
+
+ test {RESTORE can set an absolute expire} {
+ r set foo bar
+ set encoded [r dump foo]
+ r del foo
+ set now [clock milliseconds]
+ r restore foo [expr $now+3000] $encoded absttl
+ set ttl [r pttl foo]
+ assert_range $ttl 2000 3100
+ r get foo
+ } {bar}
+
+ test {RESTORE with ABSTTL in the past} {
+ r set foo bar
+ set encoded [r dump foo]
+ set now [clock milliseconds]
+ r debug set-active-expire 0
+ r restore foo [expr $now-3000] $encoded absttl REPLACE
+ catch {r debug object foo} e
+ r debug set-active-expire 1
+ set e
+ } {ERR no such key} {needs:debug}
+
+ test {RESTORE can set LRU} {
+ r set foo bar
+ set encoded [r dump foo]
+ r del foo
+ r config set maxmemory-policy allkeys-lru
+ r restore foo 0 $encoded idletime 1000
+ set idle [r object idletime foo]
+ assert {$idle >= 1000 && $idle <= 1010}
+ assert_equal [r get foo] {bar}
+ r config set maxmemory-policy noeviction
+ } {OK} {needs:config-maxmemory}
+
+ test {RESTORE can set LFU} {
+ r set foo bar
+ set encoded [r dump foo]
+ r del foo
+ r config set maxmemory-policy allkeys-lfu
+ r restore foo 0 $encoded freq 100
+ set freq [r object freq foo]
+ assert {$freq == 100}
+ r get foo
+ assert_equal [r get foo] {bar}
+ r config set maxmemory-policy noeviction
+ } {OK} {needs:config-maxmemory}
+
+ test {RESTORE returns an error of the key already exists} {
+ r set foo bar
+ set e {}
+ catch {r restore foo 0 "..."} e
+ set e
+ } {*BUSYKEY*}
+
+ test {RESTORE can overwrite an existing key with REPLACE} {
+ r set foo bar1
+ set encoded1 [r dump foo]
+ r set foo bar2
+ set encoded2 [r dump foo]
+ r del foo
+ r restore foo 0 $encoded1
+ r restore foo 0 $encoded2 replace
+ r get foo
+ } {bar2}
+
+ test {RESTORE can detect a syntax error for unrecognized options} {
+ catch {r restore foo 0 "..." invalid-option} e
+ set e
+ } {*syntax*}
+
+ test {RESTORE should not store key that are already expired, with REPLACE will propagate it as DEL or UNLINK} {
+ r del key1{t} key2{t}
+ r set key1{t} value2
+ r lpush key2{t} 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
+
+ r set key{t} value
+ set encoded [r dump key{t}]
+ set now [clock milliseconds]
+
+ set repl [attach_to_replication_stream]
+
+ # Keys that have expired will not be stored.
+ r config set lazyfree-lazy-server-del no
+ assert_equal {OK} [r restore key1{t} [expr $now-5000] $encoded replace absttl]
+ r config set lazyfree-lazy-server-del yes
+ assert_equal {OK} [r restore key2{t} [expr $now-5000] $encoded replace absttl]
+ assert_equal {0} [r exists key1{t} key2{t}]
+
+ # Verify the propagate of DEL and UNLINK.
+ assert_replication_stream $repl {
+ {select *}
+ {del key1{t}}
+ {unlink key2{t}}
+ }
+
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test {DUMP of non existing key returns nil} {
+ r dump nonexisting_key
+ } {}
+
+ test {MIGRATE is caching connections} {
+ # Note, we run this as first test so that the connection cache
+ # is empty.
+ set first [srv 0 client]
+ r set key "Some Value"
+ start_server {tags {"repl"}} {
+ set second [srv 0 client]
+ set second_host [srv 0 host]
+ set second_port [srv 0 port]
+
+ assert_match {*migrate_cached_sockets:0*} [r -1 info]
+ r -1 migrate $second_host $second_port key 9 1000
+ assert_match {*migrate_cached_sockets:1*} [r -1 info]
+ }
+ } {} {external:skip}
+
+ test {MIGRATE cached connections are released after some time} {
+ after 15000
+ assert_match {*migrate_cached_sockets:0*} [r info]
+ }
+
+ test {MIGRATE is able to migrate a key between two instances} {
+ set first [srv 0 client]
+ r set key "Some Value"
+ start_server {tags {"repl"}} {
+ set second [srv 0 client]
+ set second_host [srv 0 host]
+ set second_port [srv 0 port]
+
+ assert {[$first exists key] == 1}
+ assert {[$second exists key] == 0}
+ set ret [r -1 migrate $second_host $second_port key 9 5000]
+ assert {$ret eq {OK}}
+ assert {[$first exists key] == 0}
+ assert {[$second exists key] == 1}
+ assert {[$second get key] eq {Some Value}}
+ assert {[$second ttl key] == -1}
+ }
+ } {} {external:skip}
+
+ test {MIGRATE is able to copy a key between two instances} {
+ set first [srv 0 client]
+ r del list
+ r lpush list a b c d
+ start_server {tags {"repl"}} {
+ set second [srv 0 client]
+ set second_host [srv 0 host]
+ set second_port [srv 0 port]
+
+ assert {[$first exists list] == 1}
+ assert {[$second exists list] == 0}
+ set ret [r -1 migrate $second_host $second_port list 9 5000 copy]
+ assert {$ret eq {OK}}
+ assert {[$first exists list] == 1}
+ assert {[$second exists list] == 1}
+ assert {[$first lrange list 0 -1] eq [$second lrange list 0 -1]}
+ }
+ } {} {external:skip}
+
+ test {MIGRATE will not overwrite existing keys, unless REPLACE is used} {
+ set first [srv 0 client]
+ r del list
+ r lpush list a b c d
+ start_server {tags {"repl"}} {
+ set second [srv 0 client]
+ set second_host [srv 0 host]
+ set second_port [srv 0 port]
+
+ assert {[$first exists list] == 1}
+ assert {[$second exists list] == 0}
+ $second set list somevalue
+ catch {r -1 migrate $second_host $second_port list 9 5000 copy} e
+ assert_match {ERR*} $e
+ set ret [r -1 migrate $second_host $second_port list 9 5000 copy replace]
+ assert {$ret eq {OK}}
+ assert {[$first exists list] == 1}
+ assert {[$second exists list] == 1}
+ assert {[$first lrange list 0 -1] eq [$second lrange list 0 -1]}
+ }
+ } {} {external:skip}
+
+ test {MIGRATE propagates TTL correctly} {
+ set first [srv 0 client]
+ r set key "Some Value"
+ start_server {tags {"repl"}} {
+ set second [srv 0 client]
+ set second_host [srv 0 host]
+ set second_port [srv 0 port]
+
+ assert {[$first exists key] == 1}
+ assert {[$second exists key] == 0}
+ $first expire key 10
+ set ret [r -1 migrate $second_host $second_port key 9 5000]
+ assert {$ret eq {OK}}
+ assert {[$first exists key] == 0}
+ assert {[$second exists key] == 1}
+ assert {[$second get key] eq {Some Value}}
+ assert {[$second ttl key] >= 7 && [$second ttl key] <= 10}
+ }
+ } {} {external:skip}
+
+ test {MIGRATE can correctly transfer large values} {
+ set first [srv 0 client]
+ r del key
+ for {set j 0} {$j < 40000} {incr j} {
+ r rpush key 1 2 3 4 5 6 7 8 9 10
+ r rpush key "item 1" "item 2" "item 3" "item 4" "item 5" \
+ "item 6" "item 7" "item 8" "item 9" "item 10"
+ }
+ assert {[string length [r dump key]] > (1024*64)}
+ start_server {tags {"repl"}} {
+ set second [srv 0 client]
+ set second_host [srv 0 host]
+ set second_port [srv 0 port]
+
+ assert {[$first exists key] == 1}
+ assert {[$second exists key] == 0}
+ set ret [r -1 migrate $second_host $second_port key 9 10000]
+ assert {$ret eq {OK}}
+ assert {[$first exists key] == 0}
+ assert {[$second exists key] == 1}
+ assert {[$second ttl key] == -1}
+ assert {[$second llen key] == 40000*20}
+ }
+ } {} {external:skip}
+
+ test {MIGRATE can correctly transfer hashes} {
+ set first [srv 0 client]
+ r del key
+ r hmset key field1 "item 1" field2 "item 2" field3 "item 3" \
+ field4 "item 4" field5 "item 5" field6 "item 6"
+ start_server {tags {"repl"}} {
+ set second [srv 0 client]
+ set second_host [srv 0 host]
+ set second_port [srv 0 port]
+
+ assert {[$first exists key] == 1}
+ assert {[$second exists key] == 0}
+ set ret [r -1 migrate $second_host $second_port key 9 10000]
+ assert {$ret eq {OK}}
+ assert {[$first exists key] == 0}
+ assert {[$second exists key] == 1}
+ assert {[$second ttl key] == -1}
+ }
+ } {} {external:skip}
+
+ test {MIGRATE timeout actually works} {
+ set first [srv 0 client]
+ r set key "Some Value"
+ start_server {tags {"repl"}} {
+ set second [srv 0 client]
+ set second_host [srv 0 host]
+ set second_port [srv 0 port]
+
+ assert {[$first exists key] == 1}
+ assert {[$second exists key] == 0}
+
+ set rd [redis_deferring_client]
+ $rd debug sleep 1.0 ; # Make second server unable to reply.
+ set e {}
+ catch {r -1 migrate $second_host $second_port key 9 500} e
+ assert_match {IOERR*} $e
+ }
+ } {} {external:skip}
+
+ test {MIGRATE can migrate multiple keys at once} {
+ set first [srv 0 client]
+ r set key1 "v1"
+ r set key2 "v2"
+ r set key3 "v3"
+ start_server {tags {"repl"}} {
+ set second [srv 0 client]
+ set second_host [srv 0 host]
+ set second_port [srv 0 port]
+
+ assert {[$first exists key1] == 1}
+ assert {[$second exists key1] == 0}
+ set ret [r -1 migrate $second_host $second_port "" 9 5000 keys key1 key2 key3]
+ assert {$ret eq {OK}}
+ assert {[$first exists key1] == 0}
+ assert {[$first exists key2] == 0}
+ assert {[$first exists key3] == 0}
+ assert {[$second get key1] eq {v1}}
+ assert {[$second get key2] eq {v2}}
+ assert {[$second get key3] eq {v3}}
+ }
+ } {} {external:skip}
+
+ test {MIGRATE with multiple keys must have empty key arg} {
+ catch {r MIGRATE 127.0.0.1 6379 NotEmpty 9 5000 keys a b c} e
+ set e
+ } {*empty string*} {external:skip}
+
+ test {MIGRATE with multiple keys migrate just existing ones} {
+ set first [srv 0 client]
+ r set key1 "v1"
+ r set key2 "v2"
+ r set key3 "v3"
+ start_server {tags {"repl"}} {
+ set second [srv 0 client]
+ set second_host [srv 0 host]
+ set second_port [srv 0 port]
+
+ set ret [r -1 migrate $second_host $second_port "" 9 5000 keys nokey-1 nokey-2 nokey-2]
+ assert {$ret eq {NOKEY}}
+
+ assert {[$first exists key1] == 1}
+ assert {[$second exists key1] == 0}
+ set ret [r -1 migrate $second_host $second_port "" 9 5000 keys nokey-1 key1 nokey-2 key2 nokey-3 key3]
+ assert {$ret eq {OK}}
+ assert {[$first exists key1] == 0}
+ assert {[$first exists key2] == 0}
+ assert {[$first exists key3] == 0}
+ assert {[$second get key1] eq {v1}}
+ assert {[$second get key2] eq {v2}}
+ assert {[$second get key3] eq {v3}}
+ }
+ } {} {external:skip}
+
+ test {MIGRATE with multiple keys: stress command rewriting} {
+ set first [srv 0 client]
+ r flushdb
+ r mset a 1 b 2 c 3 d 4 c 5 e 6 f 7 g 8 h 9 i 10 l 11 m 12 n 13 o 14 p 15 q 16
+ start_server {tags {"repl"}} {
+ set second [srv 0 client]
+ set second_host [srv 0 host]
+ set second_port [srv 0 port]
+
+ set ret [r -1 migrate $second_host $second_port "" 9 5000 keys a b c d e f g h i l m n o p q]
+
+ assert {[$first dbsize] == 0}
+ assert {[$second dbsize] == 15}
+ }
+ } {} {external:skip}
+
+ test {MIGRATE with multiple keys: delete just ack keys} {
+ set first [srv 0 client]
+ r flushdb
+ r mset a 1 b 2 c 3 d 4 c 5 e 6 f 7 g 8 h 9 i 10 l 11 m 12 n 13 o 14 p 15 q 16
+ start_server {tags {"repl"}} {
+ set second [srv 0 client]
+ set second_host [srv 0 host]
+ set second_port [srv 0 port]
+
+ $second mset c _ d _; # Two busy keys and no REPLACE used
+
+ catch {r -1 migrate $second_host $second_port "" 9 5000 keys a b c d e f g h i l m n o p q} e
+
+ assert {[$first dbsize] == 2}
+ assert {[$second dbsize] == 15}
+ assert {[$first exists c] == 1}
+ assert {[$first exists d] == 1}
+ }
+ } {} {external:skip}
+
+ test {MIGRATE AUTH: correct and wrong password cases} {
+ set first [srv 0 client]
+ r del list
+ r lpush list a b c d
+ start_server {tags {"repl"}} {
+ set second [srv 0 client]
+ set second_host [srv 0 host]
+ set second_port [srv 0 port]
+ $second config set requirepass foobar
+ $second auth foobar
+
+ assert {[$first exists list] == 1}
+ assert {[$second exists list] == 0}
+ set ret [r -1 migrate $second_host $second_port list 9 5000 AUTH foobar]
+ assert {$ret eq {OK}}
+ assert {[$second exists list] == 1}
+ assert {[$second lrange list 0 -1] eq {d c b a}}
+
+ r -1 lpush list a b c d
+ $second config set requirepass foobar2
+ catch {r -1 migrate $second_host $second_port list 9 5000 AUTH foobar} err
+ assert_match {*WRONGPASS*} $err
+ }
+ } {} {external:skip}
+}
diff --git a/tests/unit/expire.tcl b/tests/unit/expire.tcl
new file mode 100644
index 0000000..fc0ef61
--- /dev/null
+++ b/tests/unit/expire.tcl
@@ -0,0 +1,835 @@
+start_server {tags {"expire"}} {
+ test {EXPIRE - set timeouts multiple times} {
+ r set x foobar
+ set v1 [r expire x 5]
+ set v2 [r ttl x]
+ set v3 [r expire x 10]
+ set v4 [r ttl x]
+ r expire x 2
+ list $v1 $v2 $v3 $v4
+ } {1 [45] 1 10}
+
+ test {EXPIRE - It should be still possible to read 'x'} {
+ r get x
+ } {foobar}
+
+ tags {"slow"} {
+ test {EXPIRE - After 2.1 seconds the key should no longer be here} {
+ after 2100
+ list [r get x] [r exists x]
+ } {{} 0}
+ }
+
+ test {EXPIRE - write on expire should work} {
+ r del x
+ r lpush x foo
+ r expire x 1000
+ r lpush x bar
+ r lrange x 0 -1
+ } {bar foo}
+
+ test {EXPIREAT - Check for EXPIRE alike behavior} {
+ r del x
+ r set x foo
+ r expireat x [expr [clock seconds]+15]
+ r ttl x
+ } {1[345]}
+
+ test {SETEX - Set + Expire combo operation. Check for TTL} {
+ r setex x 12 test
+ r ttl x
+ } {1[012]}
+
+ test {SETEX - Check value} {
+ r get x
+ } {test}
+
+ test {SETEX - Overwrite old key} {
+ r setex y 1 foo
+ r get y
+ } {foo}
+
+ tags {"slow"} {
+ test {SETEX - Wait for the key to expire} {
+ after 1100
+ r get y
+ } {}
+ }
+
+ test {SETEX - Wrong time parameter} {
+ catch {r setex z -10 foo} e
+ set _ $e
+ } {*invalid expire*}
+
+ test {PERSIST can undo an EXPIRE} {
+ r set x foo
+ r expire x 50
+ list [r ttl x] [r persist x] [r ttl x] [r get x]
+ } {50 1 -1 foo}
+
+ test {PERSIST returns 0 against non existing or non volatile keys} {
+ r set x foo
+ list [r persist foo] [r persist nokeyatall]
+ } {0 0}
+
+ test {EXPIRE precision is now the millisecond} {
+ # This test is very likely to do a false positive if the
+ # server is under pressure, so if it does not work give it a few more
+ # chances.
+ for {set j 0} {$j < 30} {incr j} {
+ r del x
+ r setex x 1 somevalue
+ after 800
+ set a [r get x]
+ if {$a ne {somevalue}} continue
+ after 300
+ set b [r get x]
+ if {$b eq {}} break
+ }
+ if {$::verbose} {
+ puts "millisecond expire test attempts: $j"
+ }
+ assert_equal $a {somevalue}
+ assert_equal $b {}
+ }
+
+ test "PSETEX can set sub-second expires" {
+ # This test is very likely to do a false positive if the server is
+ # under pressure, so if it does not work give it a few more chances.
+ for {set j 0} {$j < 50} {incr j} {
+ r del x
+ r psetex x 100 somevalue
+ set a [r get x]
+ after 101
+ set b [r get x]
+
+ if {$a eq {somevalue} && $b eq {}} break
+ }
+ if {$::verbose} { puts "PSETEX sub-second expire test attempts: $j" }
+ list $a $b
+ } {somevalue {}}
+
+ test "PEXPIRE can set sub-second expires" {
+ # This test is very likely to do a false positive if the server is
+ # under pressure, so if it does not work give it a few more chances.
+ for {set j 0} {$j < 50} {incr j} {
+ r set x somevalue
+ r pexpire x 100
+ set c [r get x]
+ after 101
+ set d [r get x]
+
+ if {$c eq {somevalue} && $d eq {}} break
+ }
+ if {$::verbose} { puts "PEXPIRE sub-second expire test attempts: $j" }
+ list $c $d
+ } {somevalue {}}
+
+ test "PEXPIREAT can set sub-second expires" {
+ # This test is very likely to do a false positive if the server is
+ # under pressure, so if it does not work give it a few more chances.
+ for {set j 0} {$j < 50} {incr j} {
+ r set x somevalue
+ set now [r time]
+ r pexpireat x [expr ([lindex $now 0]*1000)+([lindex $now 1]/1000)+200]
+ set e [r get x]
+ after 201
+ set f [r get x]
+
+ if {$e eq {somevalue} && $f eq {}} break
+ }
+ if {$::verbose} { puts "PEXPIREAT sub-second expire test attempts: $j" }
+ list $e $f
+ } {somevalue {}}
+
+ test {TTL returns time to live in seconds} {
+ r del x
+ r setex x 10 somevalue
+ set ttl [r ttl x]
+ assert {$ttl > 8 && $ttl <= 10}
+ }
+
+ test {PTTL returns time to live in milliseconds} {
+ r del x
+ r setex x 1 somevalue
+ set ttl [r pttl x]
+ assert {$ttl > 500 && $ttl <= 1000}
+ }
+
+ test {TTL / PTTL / EXPIRETIME / PEXPIRETIME return -1 if key has no expire} {
+ r del x
+ r set x hello
+ list [r ttl x] [r pttl x] [r expiretime x] [r pexpiretime x]
+ } {-1 -1 -1 -1}
+
+ test {TTL / PTTL / EXPIRETIME / PEXPIRETIME return -2 if key does not exit} {
+ r del x
+ list [r ttl x] [r pttl x] [r expiretime x] [r pexpiretime x]
+ } {-2 -2 -2 -2}
+
+ test {EXPIRETIME returns absolute expiration time in seconds} {
+ r del x
+ set abs_expire [expr [clock seconds] + 100]
+ r set x somevalue exat $abs_expire
+ assert_equal [r expiretime x] $abs_expire
+ }
+
+ test {PEXPIRETIME returns absolute expiration time in milliseconds} {
+ r del x
+ set abs_expire [expr [clock milliseconds] + 100000]
+ r set x somevalue pxat $abs_expire
+ assert_equal [r pexpiretime x] $abs_expire
+ }
+
+ test {Redis should actively expire keys incrementally} {
+ r flushdb
+ r psetex key1 500 a
+ r psetex key2 500 a
+ r psetex key3 500 a
+ assert_equal 3 [r dbsize]
+ # Redis expires random keys ten times every second so we are
+ # fairly sure that all the three keys should be evicted after
+ # two seconds.
+ wait_for_condition 20 100 {
+ [r dbsize] eq 0
+ } fail {
+ "Keys did not actively expire."
+ }
+ }
+
+ test {Redis should lazy expire keys} {
+ r flushdb
+ r debug set-active-expire 0
+ r psetex key1{t} 500 a
+ r psetex key2{t} 500 a
+ r psetex key3{t} 500 a
+ set size1 [r dbsize]
+ # Redis expires random keys ten times every second so we are
+ # fairly sure that all the three keys should be evicted after
+ # one second.
+ after 1000
+ set size2 [r dbsize]
+ r mget key1{t} key2{t} key3{t}
+ set size3 [r dbsize]
+ r debug set-active-expire 1
+ list $size1 $size2 $size3
+ } {3 3 0} {needs:debug}
+
+ test {EXPIRE should not resurrect keys (issue #1026)} {
+ r debug set-active-expire 0
+ r set foo bar
+ r pexpire foo 500
+ after 1000
+ r expire foo 10
+ r debug set-active-expire 1
+ r exists foo
+ } {0} {needs:debug}
+
+ test {5 keys in, 5 keys out} {
+ r flushdb
+ r set a c
+ r expire a 5
+ r set t c
+ r set e c
+ r set s c
+ r set foo b
+ assert_equal [lsort [r keys *]] {a e foo s t}
+ r del a ; # Do not leak volatile keys to other tests
+ }
+
+ test {EXPIRE with empty string as TTL should report an error} {
+ r set foo bar
+ catch {r expire foo ""} e
+ set e
+ } {*not an integer*}
+
+ test {SET with EX with big integer should report an error} {
+ catch {r set foo bar EX 10000000000000000} e
+ set e
+ } {ERR invalid expire time in 'set' command}
+
+ test {SET with EX with smallest integer should report an error} {
+ catch {r SET foo bar EX -9999999999999999} e
+ set e
+ } {ERR invalid expire time in 'set' command}
+
+ test {GETEX with big integer should report an error} {
+ r set foo bar
+ catch {r GETEX foo EX 10000000000000000} e
+ set e
+ } {ERR invalid expire time in 'getex' command}
+
+ test {GETEX with smallest integer should report an error} {
+ r set foo bar
+ catch {r GETEX foo EX -9999999999999999} e
+ set e
+ } {ERR invalid expire time in 'getex' command}
+
+ test {EXPIRE with big integer overflows when converted to milliseconds} {
+ r set foo bar
+
+ # Hit `when > LLONG_MAX - basetime`
+ assert_error "ERR invalid expire time in 'expire' command" {r EXPIRE foo 9223370399119966}
+
+ # Hit `when > LLONG_MAX / 1000`
+ assert_error "ERR invalid expire time in 'expire' command" {r EXPIRE foo 9223372036854776}
+ assert_error "ERR invalid expire time in 'expire' command" {r EXPIRE foo 10000000000000000}
+ assert_error "ERR invalid expire time in 'expire' command" {r EXPIRE foo 18446744073709561}
+
+ assert_equal {-1} [r ttl foo]
+ }
+
+ test {PEXPIRE with big integer overflow when basetime is added} {
+ r set foo bar
+ catch {r PEXPIRE foo 9223372036854770000} e
+ set e
+ } {ERR invalid expire time in 'pexpire' command}
+
+ test {EXPIRE with big negative integer} {
+ r set foo bar
+
+ # Hit `when < LLONG_MIN / 1000`
+ assert_error "ERR invalid expire time in 'expire' command" {r EXPIRE foo -9223372036854776}
+ assert_error "ERR invalid expire time in 'expire' command" {r EXPIRE foo -9999999999999999}
+
+ r ttl foo
+ } {-1}
+
+ test {PEXPIREAT with big integer works} {
+ r set foo bar
+ r PEXPIREAT foo 9223372036854770000
+ } {1}
+
+ test {PEXPIREAT with big negative integer works} {
+ r set foo bar
+ r PEXPIREAT foo -9223372036854770000
+ r ttl foo
+ } {-2}
+
+ # Start a new server with empty data and AOF file.
+ start_server {overrides {appendonly {yes} appendfsync always} tags {external:skip}} {
+ test {All time-to-live(TTL) in commands are propagated as absolute timestamp in milliseconds in AOF} {
+ # This test makes sure that expire times are propagated as absolute
+ # times to the AOF file and not as relative time, so that when the AOF
+ # is reloaded the TTLs are not being shifted forward to the future.
+ # We want the time to logically pass when the server is restarted!
+
+ set aof [get_last_incr_aof_path r]
+
+ # Apply each TTL-related command to a unique key
+ # SET commands
+ r set foo1 bar ex 100
+ r set foo2 bar px 100000
+ r set foo3 bar exat [expr [clock seconds]+100]
+ r set foo4 bar PXAT [expr [clock milliseconds]+100000]
+ r setex foo5 100 bar
+ r psetex foo6 100000 bar
+ # EXPIRE-family commands
+ r set foo7 bar
+ r expire foo7 100
+ r set foo8 bar
+ r pexpire foo8 100000
+ r set foo9 bar
+ r expireat foo9 [expr [clock seconds]+100]
+ r set foo10 bar
+ r pexpireat foo10 [expr [clock seconds]*1000+100000]
+ r set foo11 bar
+ r expireat foo11 [expr [clock seconds]-100]
+ # GETEX commands
+ r set foo12 bar
+ r getex foo12 ex 100
+ r set foo13 bar
+ r getex foo13 px 100000
+ r set foo14 bar
+ r getex foo14 exat [expr [clock seconds]+100]
+ r set foo15 bar
+ r getex foo15 pxat [expr [clock milliseconds]+100000]
+ # RESTORE commands
+ r set foo16 bar
+ set encoded [r dump foo16]
+ r restore foo17 100000 $encoded
+ r restore foo18 [expr [clock milliseconds]+100000] $encoded absttl
+
+ # Assert that each TTL-relatd command are persisted with absolute timestamps in AOF
+ assert_aof_content $aof {
+ {select *}
+ {set foo1 bar PXAT *}
+ {set foo2 bar PXAT *}
+ {set foo3 bar PXAT *}
+ {set foo4 bar PXAT *}
+ {set foo5 bar PXAT *}
+ {set foo6 bar PXAT *}
+ {set foo7 bar}
+ {pexpireat foo7 *}
+ {set foo8 bar}
+ {pexpireat foo8 *}
+ {set foo9 bar}
+ {pexpireat foo9 *}
+ {set foo10 bar}
+ {pexpireat foo10 *}
+ {set foo11 bar}
+ {del foo11}
+ {set foo12 bar}
+ {pexpireat foo12 *}
+ {set foo13 bar}
+ {pexpireat foo13 *}
+ {set foo14 bar}
+ {pexpireat foo14 *}
+ {set foo15 bar}
+ {pexpireat foo15 *}
+ {set foo16 bar}
+ {restore foo17 * {*} ABSTTL}
+ {restore foo18 * {*} absttl}
+ }
+
+ # Remember the absolute TTLs of all the keys
+ set ttl1 [r pexpiretime foo1]
+ set ttl2 [r pexpiretime foo2]
+ set ttl3 [r pexpiretime foo3]
+ set ttl4 [r pexpiretime foo4]
+ set ttl5 [r pexpiretime foo5]
+ set ttl6 [r pexpiretime foo6]
+ set ttl7 [r pexpiretime foo7]
+ set ttl8 [r pexpiretime foo8]
+ set ttl9 [r pexpiretime foo9]
+ set ttl10 [r pexpiretime foo10]
+ assert_equal "-2" [r pexpiretime foo11] ; # foo11 is gone
+ set ttl12 [r pexpiretime foo12]
+ set ttl13 [r pexpiretime foo13]
+ set ttl14 [r pexpiretime foo14]
+ set ttl15 [r pexpiretime foo15]
+ assert_equal "-1" [r pexpiretime foo16] ; # foo16 has no TTL
+ set ttl17 [r pexpiretime foo17]
+ set ttl18 [r pexpiretime foo18]
+
+ # Let some time pass and reload data from AOF
+ after 2000
+ r debug loadaof
+
+ # Assert that relative TTLs are roughly the same
+ assert_range [r ttl foo1] 90 98
+ assert_range [r ttl foo2] 90 98
+ assert_range [r ttl foo3] 90 98
+ assert_range [r ttl foo4] 90 98
+ assert_range [r ttl foo5] 90 98
+ assert_range [r ttl foo6] 90 98
+ assert_range [r ttl foo7] 90 98
+ assert_range [r ttl foo8] 90 98
+ assert_range [r ttl foo9] 90 98
+ assert_range [r ttl foo10] 90 98
+ assert_equal [r ttl foo11] "-2" ; # foo11 is gone
+ assert_range [r ttl foo12] 90 98
+ assert_range [r ttl foo13] 90 98
+ assert_range [r ttl foo14] 90 98
+ assert_range [r ttl foo15] 90 98
+ assert_equal [r ttl foo16] "-1" ; # foo16 has no TTL
+ assert_range [r ttl foo17] 90 98
+ assert_range [r ttl foo18] 90 98
+
+ # Assert that all keys have restored the same absolute TTLs from AOF
+ assert_equal [r pexpiretime foo1] $ttl1
+ assert_equal [r pexpiretime foo2] $ttl2
+ assert_equal [r pexpiretime foo3] $ttl3
+ assert_equal [r pexpiretime foo4] $ttl4
+ assert_equal [r pexpiretime foo5] $ttl5
+ assert_equal [r pexpiretime foo6] $ttl6
+ assert_equal [r pexpiretime foo7] $ttl7
+ assert_equal [r pexpiretime foo8] $ttl8
+ assert_equal [r pexpiretime foo9] $ttl9
+ assert_equal [r pexpiretime foo10] $ttl10
+ assert_equal [r pexpiretime foo11] "-2" ; # foo11 is gone
+ assert_equal [r pexpiretime foo12] $ttl12
+ assert_equal [r pexpiretime foo13] $ttl13
+ assert_equal [r pexpiretime foo14] $ttl14
+ assert_equal [r pexpiretime foo15] $ttl15
+ assert_equal [r pexpiretime foo16] "-1" ; # foo16 has no TTL
+ assert_equal [r pexpiretime foo17] $ttl17
+ assert_equal [r pexpiretime foo18] $ttl18
+ } {} {needs:debug}
+ }
+
+ test {All TTL in commands are propagated as absolute timestamp in replication stream} {
+ # Make sure that both relative and absolute expire commands are propagated
+ # as absolute to replicas for two reasons:
+ # 1) We want to avoid replicas retaining data much longer than primary due
+ # to replication lag.
+ # 2) We want to unify the way TTLs are replicated in both RDB and replication
+ # stream, which is as absolute timestamps.
+ # See: https://github.com/redis/redis/issues/8433
+
+ r flushall ; # Clean up keyspace to avoid interference by keys from other tests
+ set repl [attach_to_replication_stream]
+ # SET commands
+ r set foo1 bar ex 200
+ r set foo1 bar px 100000
+ r set foo1 bar exat [expr [clock seconds]+100]
+ r set foo1 bar pxat [expr [clock milliseconds]+100000]
+ r setex foo1 100 bar
+ r psetex foo1 100000 bar
+ r set foo2 bar
+ # EXPIRE-family commands
+ r expire foo2 100
+ r pexpire foo2 100000
+ r set foo3 bar
+ r expireat foo3 [expr [clock seconds]+100]
+ r pexpireat foo3 [expr [clock seconds]*1000+100000]
+ r expireat foo3 [expr [clock seconds]-100]
+ # GETEX-family commands
+ r set foo4 bar
+ r getex foo4 ex 200
+ r getex foo4 px 200000
+ r getex foo4 exat [expr [clock seconds]+100]
+ r getex foo4 pxat [expr [clock milliseconds]+100000]
+ # RESTORE commands
+ r set foo5 bar
+ set encoded [r dump foo5]
+ r restore foo6 100000 $encoded
+ r restore foo7 [expr [clock milliseconds]+100000] $encoded absttl
+
+ assert_replication_stream $repl {
+ {select *}
+ {set foo1 bar PXAT *}
+ {set foo1 bar PXAT *}
+ {set foo1 bar PXAT *}
+ {set foo1 bar pxat *}
+ {set foo1 bar PXAT *}
+ {set foo1 bar PXAT *}
+ {set foo2 bar}
+ {pexpireat foo2 *}
+ {pexpireat foo2 *}
+ {set foo3 bar}
+ {pexpireat foo3 *}
+ {pexpireat foo3 *}
+ {del foo3}
+ {set foo4 bar}
+ {pexpireat foo4 *}
+ {pexpireat foo4 *}
+ {pexpireat foo4 *}
+ {pexpireat foo4 *}
+ {set foo5 bar}
+ {restore foo6 * {*} ABSTTL}
+ {restore foo7 * {*} absttl}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ # Start another server to test replication of TTLs
+ start_server {tags {needs:repl external:skip}} {
+ # Set the outer layer server as primary
+ set primary [srv -1 client]
+ set primary_host [srv -1 host]
+ set primary_port [srv -1 port]
+ # Set this inner layer server as replica
+ set replica [srv 0 client]
+
+ test {First server should have role slave after REPLICAOF} {
+ $replica replicaof $primary_host $primary_port
+ wait_for_condition 50 100 {
+ [s 0 role] eq {slave}
+ } else {
+ fail "Replication not started."
+ }
+ }
+
+ test {For all replicated TTL-related commands, absolute expire times are identical on primary and replica} {
+ # Apply each TTL-related command to a unique key on primary
+ # SET commands
+ $primary set foo1 bar ex 100
+ $primary set foo2 bar px 100000
+ $primary set foo3 bar exat [expr [clock seconds]+100]
+ $primary set foo4 bar pxat [expr [clock milliseconds]+100000]
+ $primary setex foo5 100 bar
+ $primary psetex foo6 100000 bar
+ # EXPIRE-family commands
+ $primary set foo7 bar
+ $primary expire foo7 100
+ $primary set foo8 bar
+ $primary pexpire foo8 100000
+ $primary set foo9 bar
+ $primary expireat foo9 [expr [clock seconds]+100]
+ $primary set foo10 bar
+ $primary pexpireat foo10 [expr [clock milliseconds]+100000]
+ # GETEX commands
+ $primary set foo11 bar
+ $primary getex foo11 ex 100
+ $primary set foo12 bar
+ $primary getex foo12 px 100000
+ $primary set foo13 bar
+ $primary getex foo13 exat [expr [clock seconds]+100]
+ $primary set foo14 bar
+ $primary getex foo14 pxat [expr [clock milliseconds]+100000]
+ # RESTORE commands
+ $primary set foo15 bar
+ set encoded [$primary dump foo15]
+ $primary restore foo16 100000 $encoded
+ $primary restore foo17 [expr [clock milliseconds]+100000] $encoded absttl
+
+ # Wait for replica to get the keys and TTLs
+ assert {[$primary wait 1 0] == 1}
+
+ # Verify absolute TTLs are identical on primary and replica for all keys
+ # This is because TTLs are always replicated as absolute values
+ foreach key [$primary keys *] {
+ assert_equal [$primary pexpiretime $key] [$replica pexpiretime $key]
+ }
+ }
+
+ test {expired key which is created in writeable replicas should be deleted by active expiry} {
+ $primary flushall
+ $replica config set replica-read-only no
+ foreach {yes_or_no} {yes no} {
+ $replica config set appendonly $yes_or_no
+ waitForBgrewriteaof $replica
+ set prev_expired [s expired_keys]
+ $replica set foo bar PX 1
+ wait_for_condition 100 10 {
+ [s expired_keys] eq $prev_expired + 1
+ } else {
+ fail "key not expired"
+ }
+ assert_equal {} [$replica get foo]
+ }
+ }
+ }
+
+ test {SET command will remove expire} {
+ r set foo bar EX 100
+ r set foo bar
+ r ttl foo
+ } {-1}
+
+ test {SET - use KEEPTTL option, TTL should not be removed} {
+ r set foo bar EX 100
+ r set foo bar KEEPTTL
+ set ttl [r ttl foo]
+ assert {$ttl <= 100 && $ttl > 90}
+ }
+
+ test {SET - use KEEPTTL option, TTL should not be removed after loadaof} {
+ r config set appendonly yes
+ r set foo bar EX 100
+ r set foo bar2 KEEPTTL
+ after 2000
+ r debug loadaof
+ set ttl [r ttl foo]
+ assert {$ttl <= 98 && $ttl > 90}
+ } {} {needs:debug}
+
+ test {GETEX use of PERSIST option should remove TTL} {
+ r set foo bar EX 100
+ r getex foo PERSIST
+ r ttl foo
+ } {-1}
+
+ test {GETEX use of PERSIST option should remove TTL after loadaof} {
+ r config set appendonly yes
+ r set foo bar EX 100
+ r getex foo PERSIST
+ r debug loadaof
+ r ttl foo
+ } {-1} {needs:debug}
+
+ test {GETEX propagate as to replica as PERSIST, DEL, or nothing} {
+ # In the above tests, many keys with random expiration times are set, flush
+ # the DBs to avoid active expiry kicking in and messing the replication streams.
+ r flushall
+ set repl [attach_to_replication_stream]
+ r set foo bar EX 100
+ r getex foo PERSIST
+ r getex foo
+ r getex foo exat [expr [clock seconds]-100]
+ assert_replication_stream $repl {
+ {select *}
+ {set foo bar PXAT *}
+ {persist foo}
+ {del foo}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test {EXPIRE with NX option on a key with ttl} {
+ r SET foo bar EX 100
+ assert_equal [r EXPIRE foo 200 NX] 0
+ assert_range [r TTL foo] 50 100
+ } {}
+
+ test {EXPIRE with NX option on a key without ttl} {
+ r SET foo bar
+ assert_equal [r EXPIRE foo 200 NX] 1
+ assert_range [r TTL foo] 100 200
+ } {}
+
+ test {EXPIRE with XX option on a key with ttl} {
+ r SET foo bar EX 100
+ assert_equal [r EXPIRE foo 200 XX] 1
+ assert_range [r TTL foo] 100 200
+ } {}
+
+ test {EXPIRE with XX option on a key without ttl} {
+ r SET foo bar
+ assert_equal [r EXPIRE foo 200 XX] 0
+ assert_equal [r TTL foo] -1
+ } {}
+
+ test {EXPIRE with GT option on a key with lower ttl} {
+ r SET foo bar EX 100
+ assert_equal [r EXPIRE foo 200 GT] 1
+ assert_range [r TTL foo] 100 200
+ } {}
+
+ test {EXPIRE with GT option on a key with higher ttl} {
+ r SET foo bar EX 200
+ assert_equal [r EXPIRE foo 100 GT] 0
+ assert_range [r TTL foo] 100 200
+ } {}
+
+ test {EXPIRE with GT option on a key without ttl} {
+ r SET foo bar
+ assert_equal [r EXPIRE foo 200 GT] 0
+ assert_equal [r TTL foo] -1
+ } {}
+
+ test {EXPIRE with LT option on a key with higher ttl} {
+ r SET foo bar EX 100
+ assert_equal [r EXPIRE foo 200 LT] 0
+ assert_range [r TTL foo] 50 100
+ } {}
+
+ test {EXPIRE with LT option on a key with lower ttl} {
+ r SET foo bar EX 200
+ assert_equal [r EXPIRE foo 100 LT] 1
+ assert_range [r TTL foo] 50 100
+ } {}
+
+ test {EXPIRE with LT option on a key without ttl} {
+ r SET foo bar
+ assert_equal [r EXPIRE foo 100 LT] 1
+ assert_range [r TTL foo] 50 100
+ } {}
+
+ test {EXPIRE with LT and XX option on a key with ttl} {
+ r SET foo bar EX 200
+ assert_equal [r EXPIRE foo 100 LT XX] 1
+ assert_range [r TTL foo] 50 100
+ } {}
+
+ test {EXPIRE with LT and XX option on a key without ttl} {
+ r SET foo bar
+ assert_equal [r EXPIRE foo 200 LT XX] 0
+ assert_equal [r TTL foo] -1
+ } {}
+
+ test {EXPIRE with conflicting options: LT GT} {
+ catch {r EXPIRE foo 200 LT GT} e
+ set e
+ } {ERR GT and LT options at the same time are not compatible}
+
+ test {EXPIRE with conflicting options: NX GT} {
+ catch {r EXPIRE foo 200 NX GT} e
+ set e
+ } {ERR NX and XX, GT or LT options at the same time are not compatible}
+
+ test {EXPIRE with conflicting options: NX LT} {
+ catch {r EXPIRE foo 200 NX LT} e
+ set e
+ } {ERR NX and XX, GT or LT options at the same time are not compatible}
+
+ test {EXPIRE with conflicting options: NX XX} {
+ catch {r EXPIRE foo 200 NX XX} e
+ set e
+ } {ERR NX and XX, GT or LT options at the same time are not compatible}
+
+ test {EXPIRE with unsupported options} {
+ catch {r EXPIRE foo 200 AB} e
+ set e
+ } {ERR Unsupported option AB}
+
+ test {EXPIRE with unsupported options} {
+ catch {r EXPIRE foo 200 XX AB} e
+ set e
+ } {ERR Unsupported option AB}
+
+ test {EXPIRE with negative expiry} {
+ r SET foo bar EX 100
+ assert_equal [r EXPIRE foo -10 LT] 1
+ assert_equal [r TTL foo] -2
+ } {}
+
+ test {EXPIRE with negative expiry on a non-valitale key} {
+ r SET foo bar
+ assert_equal [r EXPIRE foo -10 LT] 1
+ assert_equal [r TTL foo] -2
+ } {}
+
+ test {EXPIRE with non-existed key} {
+ assert_equal [r EXPIRE none 100 NX] 0
+ assert_equal [r EXPIRE none 100 XX] 0
+ assert_equal [r EXPIRE none 100 GT] 0
+ assert_equal [r EXPIRE none 100 LT] 0
+ } {}
+
+ test {Redis should not propagate the read command on lazy expire} {
+ r debug set-active-expire 0
+ r flushall ; # Clean up keyspace to avoid interference by keys from other tests
+ r set foo bar PX 1
+ set repl [attach_to_replication_stream]
+ wait_for_condition 50 100 {
+ [r get foo] eq {}
+ } else {
+ fail "Replication not started."
+ }
+
+ # dummy command to verify nothing else gets into the replication stream.
+ r set x 1
+
+ assert_replication_stream $repl {
+ {select *}
+ {del foo}
+ {set x 1}
+ }
+ close_replication_stream $repl
+ assert_equal [r debug set-active-expire 1] {OK}
+ } {} {needs:debug}
+
+ test {SCAN: Lazy-expire should not be wrapped in MULTI/EXEC} {
+ r debug set-active-expire 0
+ r flushall
+
+ r set foo1 bar PX 1
+ r set foo2 bar PX 1
+ after 2
+
+ set repl [attach_to_replication_stream]
+
+ r scan 0
+
+ assert_replication_stream $repl {
+ {select *}
+ {del foo*}
+ {del foo*}
+ }
+ close_replication_stream $repl
+ assert_equal [r debug set-active-expire 1] {OK}
+ } {} {needs:debug}
+
+ test {RANDOMKEY: Lazy-expire should not be wrapped in MULTI/EXEC} {
+ r debug set-active-expire 0
+ r flushall
+
+ r set foo1 bar PX 1
+ r set foo2 bar PX 1
+ after 2
+
+ set repl [attach_to_replication_stream]
+
+ r randomkey
+
+ assert_replication_stream $repl {
+ {select *}
+ {del foo*}
+ {del foo*}
+ }
+ close_replication_stream $repl
+ assert_equal [r debug set-active-expire 1] {OK}
+ } {} {needs:debug}
+}
diff --git a/tests/unit/functions.tcl b/tests/unit/functions.tcl
new file mode 100644
index 0000000..9e8ec08
--- /dev/null
+++ b/tests/unit/functions.tcl
@@ -0,0 +1,1233 @@
+proc get_function_code {args} {
+ return [format "#!%s name=%s\nredis.register_function('%s', function(KEYS, ARGV)\n %s \nend)" [lindex $args 0] [lindex $args 1] [lindex $args 2] [lindex $args 3]]
+}
+
+proc get_no_writes_function_code {args} {
+ return [format "#!%s name=%s\nredis.register_function{function_name='%s', callback=function(KEYS, ARGV)\n %s \nend, flags={'no-writes'}}" [lindex $args 0] [lindex $args 1] [lindex $args 2] [lindex $args 3]]
+}
+
+start_server {tags {"scripting"}} {
+ test {FUNCTION - Basic usage} {
+ r function load [get_function_code LUA test test {return 'hello'}]
+ r fcall test 0
+ } {hello}
+
+ test {FUNCTION - Load with unknown argument} {
+ catch {
+ r function load foo bar [get_function_code LUA test test {return 'hello'}]
+ } e
+ set _ $e
+ } {*Unknown option given*}
+
+ test {FUNCTION - Create an already exiting library raise error} {
+ catch {
+ r function load [get_function_code LUA test test {return 'hello1'}]
+ } e
+ set _ $e
+ } {*already exists*}
+
+ test {FUNCTION - Create an already exiting library raise error (case insensitive)} {
+ catch {
+ r function load [get_function_code LUA test test {return 'hello1'}]
+ } e
+ set _ $e
+ } {*already exists*}
+
+ test {FUNCTION - Create a library with wrong name format} {
+ catch {
+ r function load [get_function_code LUA {bad\0foramat} test {return 'hello1'}]
+ } e
+ set _ $e
+ } {*Library names can only contain letters, numbers, or underscores(_)*}
+
+ test {FUNCTION - Create library with unexisting engine} {
+ catch {
+ r function load [get_function_code bad_engine test test {return 'hello1'}]
+ } e
+ set _ $e
+ } {*Engine 'bad_engine' not found*}
+
+ test {FUNCTION - Test uncompiled script} {
+ catch {
+ r function load replace [get_function_code LUA test test {bad script}]
+ } e
+ set _ $e
+ } {*Error compiling function*}
+
+ test {FUNCTION - test replace argument} {
+ r function load REPLACE [get_function_code LUA test test {return 'hello1'}]
+ r fcall test 0
+ } {hello1}
+
+ test {FUNCTION - test function case insensitive} {
+ r fcall TEST 0
+ } {hello1}
+
+ test {FUNCTION - test replace argument with failure keeps old libraries} {
+ catch {r function create LUA test REPLACE {error}}
+ r fcall test 0
+ } {hello1}
+
+ test {FUNCTION - test function delete} {
+ r function delete test
+ catch {
+ r fcall test 0
+ } e
+ set _ $e
+ } {*Function not found*}
+
+ test {FUNCTION - test fcall bad arguments} {
+ r function load [get_function_code LUA test test {return 'hello'}]
+ catch {
+ r fcall test bad_arg
+ } e
+ set _ $e
+ } {*Bad number of keys provided*}
+
+ test {FUNCTION - test fcall bad number of keys arguments} {
+ catch {
+ r fcall test 10 key1
+ } e
+ set _ $e
+ } {*Number of keys can't be greater than number of args*}
+
+ test {FUNCTION - test fcall negative number of keys} {
+ catch {
+ r fcall test -1 key1
+ } e
+ set _ $e
+ } {*Number of keys can't be negative*}
+
+ test {FUNCTION - test delete on not exiting library} {
+ catch {
+ r function delete test1
+ } e
+ set _ $e
+ } {*Library not found*}
+
+ test {FUNCTION - test function kill when function is not running} {
+ catch {
+ r function kill
+ } e
+ set _ $e
+ } {*No scripts in execution*}
+
+ test {FUNCTION - test wrong subcommand} {
+ catch {
+ r function bad_subcommand
+ } e
+ set _ $e
+ } {*unknown subcommand*}
+
+ test {FUNCTION - test loading from rdb} {
+ r debug reload
+ r fcall test 0
+ } {hello} {needs:debug}
+
+ test {FUNCTION - test debug reload different options} {
+ catch {r debug reload noflush} e
+ assert_match "*Error trying to load the RDB*" $e
+ r debug reload noflush merge
+ r function list
+ } {{library_name test engine LUA functions {{name test description {} flags {}}}}} {needs:debug}
+
+ test {FUNCTION - test debug reload with nosave and noflush} {
+ r function delete test
+ r set x 1
+ r function load [get_function_code LUA test1 test1 {return 'hello'}]
+ r debug reload
+ r function load [get_function_code LUA test2 test2 {return 'hello'}]
+ r debug reload nosave noflush merge
+ assert_equal [r fcall test1 0] {hello}
+ assert_equal [r fcall test2 0] {hello}
+ } {} {needs:debug}
+
+ test {FUNCTION - test flushall and flushdb do not clean functions} {
+ r function flush
+ r function load REPLACE [get_function_code lua test test {return redis.call('set', 'x', '1')}]
+ r flushall
+ r flushdb
+ r function list
+ } {{library_name test engine LUA functions {{name test description {} flags {}}}}}
+
+ test {FUNCTION - test function dump and restore} {
+ r function flush
+ r function load [get_function_code lua test test {return 'hello'}]
+ set e [r function dump]
+ r function delete test
+ assert_match {} [r function list]
+ r function restore $e
+ r function list
+ } {{library_name test engine LUA functions {{name test description {} flags {}}}}}
+
+ test {FUNCTION - test function dump and restore with flush argument} {
+ set e [r function dump]
+ r function flush
+ assert_match {} [r function list]
+ r function restore $e FLUSH
+ r function list
+ } {{library_name test engine LUA functions {{name test description {} flags {}}}}}
+
+ test {FUNCTION - test function dump and restore with append argument} {
+ set e [r function dump]
+ r function flush
+ assert_match {} [r function list]
+ r function load [get_function_code lua test test {return 'hello1'}]
+ catch {r function restore $e APPEND} err
+ assert_match {*already exists*} $err
+ r function flush
+ r function load [get_function_code lua test1 test1 {return 'hello1'}]
+ r function restore $e APPEND
+ assert_match {hello} [r fcall test 0]
+ assert_match {hello1} [r fcall test1 0]
+ }
+
+ test {FUNCTION - test function dump and restore with replace argument} {
+ r function flush
+ r function load [get_function_code LUA test test {return 'hello'}]
+ set e [r function dump]
+ r function flush
+ assert_match {} [r function list]
+ r function load [get_function_code lua test test {return 'hello1'}]
+ assert_match {hello1} [r fcall test 0]
+ r function restore $e REPLACE
+ assert_match {hello} [r fcall test 0]
+ }
+
+ test {FUNCTION - test function restore with bad payload do not drop existing functions} {
+ r function flush
+ r function load [get_function_code LUA test test {return 'hello'}]
+ catch {r function restore bad_payload} e
+ assert_match {*payload version or checksum are wrong*} $e
+ r function list
+ } {{library_name test engine LUA functions {{name test description {} flags {}}}}}
+
+ test {FUNCTION - test function restore with wrong number of arguments} {
+ catch {r function restore arg1 args2 arg3} e
+ set _ $e
+ } {*unknown subcommand or wrong number of arguments for 'restore'. Try FUNCTION HELP.}
+
+ test {FUNCTION - test fcall_ro with write command} {
+ r function load REPLACE [get_no_writes_function_code lua test test {return redis.call('set', 'x', '1')}]
+ catch { r fcall_ro test 1 x } e
+ set _ $e
+ } {*Write commands are not allowed from read-only scripts*}
+
+ test {FUNCTION - test fcall_ro with read only commands} {
+ r function load REPLACE [get_no_writes_function_code lua test test {return redis.call('get', 'x')}]
+ r set x 1
+ r fcall_ro test 1 x
+ } {1}
+
+ test {FUNCTION - test keys and argv} {
+ r function load REPLACE [get_function_code lua test test {return redis.call('set', KEYS[1], ARGV[1])}]
+ r fcall test 1 x foo
+ r get x
+ } {foo}
+
+ test {FUNCTION - test command get keys on fcall} {
+ r COMMAND GETKEYS fcall test 1 x foo
+ } {x}
+
+ test {FUNCTION - test command get keys on fcall_ro} {
+ r COMMAND GETKEYS fcall_ro test 1 x foo
+ } {x}
+
+ test {FUNCTION - test function kill} {
+ set rd [redis_deferring_client]
+ r config set busy-reply-threshold 10
+ r function load REPLACE [get_function_code lua test test {local a = 1 while true do a = a + 1 end}]
+ $rd fcall test 0
+ after 200
+ catch {r ping} e
+ assert_match {BUSY*} $e
+ assert_match {running_script {name test command {fcall test 0} duration_ms *} engines {*}} [r FUNCTION STATS]
+ r function kill
+ after 200 ; # Give some time to Lua to call the hook again...
+ assert_equal [r ping] "PONG"
+ }
+
+ test {FUNCTION - test script kill not working on function} {
+ set rd [redis_deferring_client]
+ r config set busy-reply-threshold 10
+ r function load REPLACE [get_function_code lua test test {local a = 1 while true do a = a + 1 end}]
+ $rd fcall test 0
+ after 200
+ catch {r ping} e
+ assert_match {BUSY*} $e
+ catch {r script kill} e
+ assert_match {BUSY*} $e
+ r function kill
+ after 200 ; # Give some time to Lua to call the hook again...
+ assert_equal [r ping] "PONG"
+ }
+
+ test {FUNCTION - test function kill not working on eval} {
+ set rd [redis_deferring_client]
+ r config set busy-reply-threshold 10
+ $rd eval {local a = 1 while true do a = a + 1 end} 0
+ after 200
+ catch {r ping} e
+ assert_match {BUSY*} $e
+ catch {r function kill} e
+ assert_match {BUSY*} $e
+ r script kill
+ after 200 ; # Give some time to Lua to call the hook again...
+ assert_equal [r ping] "PONG"
+ }
+
+ test {FUNCTION - test function flush} {
+ r function load REPLACE [get_function_code lua test test {local a = 1 while true do a = a + 1 end}]
+ assert_match {{library_name test engine LUA functions {{name test description {} flags {}}}}} [r function list]
+ r function flush
+ assert_match {} [r function list]
+
+ r function load REPLACE [get_function_code lua test test {local a = 1 while true do a = a + 1 end}]
+ assert_match {{library_name test engine LUA functions {{name test description {} flags {}}}}} [r function list]
+ r function flush async
+ assert_match {} [r function list]
+
+ r function load REPLACE [get_function_code lua test test {local a = 1 while true do a = a + 1 end}]
+ assert_match {{library_name test engine LUA functions {{name test description {} flags {}}}}} [r function list]
+ r function flush sync
+ assert_match {} [r function list]
+ }
+
+ test {FUNCTION - test function wrong argument} {
+ catch {r function flush bad_arg} e
+ assert_match {*only supports SYNC|ASYNC*} $e
+
+ catch {r function flush sync extra_arg} e
+ assert_match {*unknown subcommand or wrong number of arguments for 'flush'. Try FUNCTION HELP.} $e
+ }
+}
+
+start_server {tags {"scripting repl external:skip"}} {
+ start_server {} {
+ test "Connect a replica to the master instance" {
+ r -1 slaveof [srv 0 host] [srv 0 port]
+ wait_for_condition 150 100 {
+ [s -1 role] eq {slave} &&
+ [string match {*master_link_status:up*} [r -1 info replication]]
+ } else {
+ fail "Can't turn the instance into a replica"
+ }
+ }
+
+ test {FUNCTION - creation is replicated to replica} {
+ r function load [get_no_writes_function_code LUA test test {return 'hello'}]
+ wait_for_condition 150 100 {
+ [r -1 function list] eq {{library_name test engine LUA functions {{name test description {} flags no-writes}}}}
+ } else {
+ fail "Failed waiting for function to replicate to replica"
+ }
+ }
+
+ test {FUNCTION - call on replica} {
+ r -1 fcall test 0
+ } {hello}
+
+ test {FUNCTION - restore is replicated to replica} {
+ set e [r function dump]
+
+ r function delete test
+ wait_for_condition 150 100 {
+ [r -1 function list] eq {}
+ } else {
+ fail "Failed waiting for function to replicate to replica"
+ }
+
+ assert_equal [r function restore $e] {OK}
+
+ wait_for_condition 150 100 {
+ [r -1 function list] eq {{library_name test engine LUA functions {{name test description {} flags no-writes}}}}
+ } else {
+ fail "Failed waiting for function to replicate to replica"
+ }
+ }
+
+ test {FUNCTION - delete is replicated to replica} {
+ r function delete test
+ wait_for_condition 150 100 {
+ [r -1 function list] eq {}
+ } else {
+ fail "Failed waiting for function to replicate to replica"
+ }
+ }
+
+ test {FUNCTION - flush is replicated to replica} {
+ r function load [get_function_code LUA test test {return 'hello'}]
+ wait_for_condition 150 100 {
+ [r -1 function list] eq {{library_name test engine LUA functions {{name test description {} flags {}}}}}
+ } else {
+ fail "Failed waiting for function to replicate to replica"
+ }
+ r function flush
+ wait_for_condition 150 100 {
+ [r -1 function list] eq {}
+ } else {
+ fail "Failed waiting for function to replicate to replica"
+ }
+ }
+
+ test "Disconnecting the replica from master instance" {
+ r -1 slaveof no one
+ # creating a function after disconnect to make sure function
+ # is replicated on rdb phase
+ r function load [get_no_writes_function_code LUA test test {return 'hello'}]
+
+ # reconnect the replica
+ r -1 slaveof [srv 0 host] [srv 0 port]
+ wait_for_condition 150 100 {
+ [s -1 role] eq {slave} &&
+ [string match {*master_link_status:up*} [r -1 info replication]]
+ } else {
+ fail "Can't turn the instance into a replica"
+ }
+ }
+
+ test "FUNCTION - test replication to replica on rdb phase" {
+ r -1 fcall test 0
+ } {hello}
+
+ test "FUNCTION - test replication to replica on rdb phase info command" {
+ r -1 function list
+ } {{library_name test engine LUA functions {{name test description {} flags no-writes}}}}
+
+ test "FUNCTION - create on read only replica" {
+ catch {
+ r -1 function load [get_function_code LUA test test {return 'hello'}]
+ } e
+ set _ $e
+ } {*can't write against a read only replica*}
+
+ test "FUNCTION - delete on read only replica" {
+ catch {
+ r -1 function delete test
+ } e
+ set _ $e
+ } {*can't write against a read only replica*}
+
+ test "FUNCTION - function effect is replicated to replica" {
+ r function load REPLACE [get_function_code LUA test test {return redis.call('set', 'x', '1')}]
+ r fcall test 1 x
+ assert {[r get x] eq {1}}
+ wait_for_condition 150 100 {
+ [r -1 get x] eq {1}
+ } else {
+ fail "Failed waiting function effect to be replicated to replica"
+ }
+ }
+
+ test "FUNCTION - modify key space of read only replica" {
+ catch {
+ r -1 fcall test 1 x
+ } e
+ set _ $e
+ } {READONLY You can't write against a read only replica.}
+ }
+}
+
+test {FUNCTION can processes create, delete and flush commands in AOF when doing "debug loadaof" in read-only slaves} {
+ start_server {} {
+ r config set appendonly yes
+ waitForBgrewriteaof r
+ r FUNCTION LOAD "#!lua name=test\nredis.register_function('test', function() return 'hello' end)"
+ r config set slave-read-only yes
+ r slaveof 127.0.0.1 0
+ r debug loadaof
+ r slaveof no one
+ assert_equal [r function list] {{library_name test engine LUA functions {{name test description {} flags {}}}}}
+
+ r FUNCTION DELETE test
+
+ r slaveof 127.0.0.1 0
+ r debug loadaof
+ r slaveof no one
+ assert_equal [r function list] {}
+
+ r FUNCTION LOAD "#!lua name=test\nredis.register_function('test', function() return 'hello' end)"
+ r FUNCTION FLUSH
+
+ r slaveof 127.0.0.1 0
+ r debug loadaof
+ r slaveof no one
+ assert_equal [r function list] {}
+ }
+} {} {needs:debug external:skip}
+
+start_server {tags {"scripting"}} {
+ test {LIBRARIES - test shared function can access default globals} {
+ r function load {#!lua name=lib1
+ local function ping()
+ return redis.call('ping')
+ end
+ redis.register_function(
+ 'f1',
+ function(keys, args)
+ return ping()
+ end
+ )
+ }
+ r fcall f1 0
+ } {PONG}
+
+ test {LIBRARIES - usage and code sharing} {
+ r function load REPLACE {#!lua name=lib1
+ local function add1(a)
+ return a + 1
+ end
+ redis.register_function(
+ 'f1',
+ function(keys, args)
+ return add1(1)
+ end
+ )
+ redis.register_function(
+ 'f2',
+ function(keys, args)
+ return add1(2)
+ end
+ )
+ }
+ assert_equal [r fcall f1 0] {2}
+ assert_equal [r fcall f2 0] {3}
+ r function list
+ } {{library_name lib1 engine LUA functions {*}}}
+
+ test {LIBRARIES - test registration failure revert the entire load} {
+ catch {
+ r function load replace {#!lua name=lib1
+ local function add1(a)
+ return a + 2
+ end
+ redis.register_function(
+ 'f1',
+ function(keys, args)
+ return add1(1)
+ end
+ )
+ redis.register_function(
+ 'f2',
+ 'not a function'
+ )
+ }
+ } e
+ assert_match {*second argument to redis.register_function must be a function*} $e
+ assert_equal [r fcall f1 0] {2}
+ assert_equal [r fcall f2 0] {3}
+ }
+
+ test {LIBRARIES - test registration function name collision} {
+ catch {
+ r function load replace {#!lua name=lib2
+ redis.register_function(
+ 'f1',
+ function(keys, args)
+ return 1
+ end
+ )
+ }
+ } e
+ assert_match {*Function f1 already exists*} $e
+ assert_equal [r fcall f1 0] {2}
+ assert_equal [r fcall f2 0] {3}
+ }
+
+ test {LIBRARIES - test registration function name collision on same library} {
+ catch {
+ r function load replace {#!lua name=lib2
+ redis.register_function(
+ 'f1',
+ function(keys, args)
+ return 1
+ end
+ )
+ redis.register_function(
+ 'f1',
+ function(keys, args)
+ return 1
+ end
+ )
+ }
+ } e
+ set _ $e
+ } {*Function already exists in the library*}
+
+ test {LIBRARIES - test registration with no argument} {
+ catch {
+ r function load replace {#!lua name=lib2
+ redis.register_function()
+ }
+ } e
+ set _ $e
+ } {*wrong number of arguments to redis.register_function*}
+
+ test {LIBRARIES - test registration with only name} {
+ catch {
+ r function load replace {#!lua name=lib2
+ redis.register_function('f1')
+ }
+ } e
+ set _ $e
+ } {*calling redis.register_function with a single argument is only applicable to Lua table*}
+
+ test {LIBRARIES - test registration with to many arguments} {
+ catch {
+ r function load replace {#!lua name=lib2
+ redis.register_function('f1', function() return 1 end, {}, 'description', 'extra arg')
+ }
+ } e
+ set _ $e
+ } {*wrong number of arguments to redis.register_function*}
+
+ test {LIBRARIES - test registration with no string name} {
+ catch {
+ r function load replace {#!lua name=lib2
+ redis.register_function(nil, function() return 1 end)
+ }
+ } e
+ set _ $e
+ } {*first argument to redis.register_function must be a string*}
+
+ test {LIBRARIES - test registration with wrong name format} {
+ catch {
+ r function load replace {#!lua name=lib2
+ redis.register_function('test\0test', function() return 1 end)
+ }
+ } e
+ set _ $e
+ } {*Library names can only contain letters, numbers, or underscores(_) and must be at least one character long*}
+
+ test {LIBRARIES - test registration with empty name} {
+ catch {
+ r function load replace {#!lua name=lib2
+ redis.register_function('', function() return 1 end)
+ }
+ } e
+ set _ $e
+ } {*Library names can only contain letters, numbers, or underscores(_) and must be at least one character long*}
+
+ test {LIBRARIES - math.random from function load} {
+ catch {
+ r function load replace {#!lua name=lib2
+ return math.random()
+ }
+ } e
+ set _ $e
+ } {*attempted to access nonexistent global variable 'math'*}
+
+ test {LIBRARIES - redis.call from function load} {
+ catch {
+ r function load replace {#!lua name=lib2
+ return redis.call('ping')
+ }
+ } e
+ set _ $e
+ } {*attempted to access nonexistent global variable 'call'*}
+
+ test {LIBRARIES - redis.setresp from function load} {
+ catch {
+ r function load replace {#!lua name=lib2
+ return redis.setresp(3)
+ }
+ } e
+ set _ $e
+ } {*attempted to access nonexistent global variable 'setresp'*}
+
+ test {LIBRARIES - redis.set_repl from function load} {
+ catch {
+ r function load replace {#!lua name=lib2
+ return redis.set_repl(redis.REPL_NONE)
+ }
+ } e
+ set _ $e
+ } {*attempted to access nonexistent global variable 'set_repl'*}
+
+ test {LIBRARIES - redis.acl_check_cmd from function load} {
+ catch {
+ r function load replace {#!lua name=lib2
+ return redis.acl_check_cmd('set','xx',1)
+ }
+ } e
+ set _ $e
+ } {*attempted to access nonexistent global variable 'acl_check_cmd'*}
+
+ test {LIBRARIES - malicious access test} {
+ # the 'library' API is not exposed inside a
+ # function context and the 'redis' API is not
+ # expose on the library registration context.
+ # But a malicious user might find a way to hack it
+ # (as demonstrated in this test). This is why we
+ # have another level of protection on the C
+ # code itself and we want to test it and verify
+ # that it works properly.
+ r function load replace {#!lua name=lib1
+ local lib = redis
+ lib.register_function('f1', function ()
+ lib.redis = redis
+ lib.math = math
+ return {ok='OK'}
+ end)
+
+ lib.register_function('f2', function ()
+ lib.register_function('f1', function ()
+ lib.redis = redis
+ lib.math = math
+ return {ok='OK'}
+ end)
+ end)
+ }
+ catch {[r fcall f1 0]} e
+ assert_match {*Attempt to modify a readonly table*} $e
+
+ catch {[r function load {#!lua name=lib2
+ redis.math.random()
+ }]} e
+ assert_match {*Script attempted to access nonexistent global variable 'math'*} $e
+
+ catch {[r function load {#!lua name=lib2
+ redis.redis.call('ping')
+ }]} e
+ assert_match {*Script attempted to access nonexistent global variable 'redis'*} $e
+
+ catch {[r fcall f2 0]} e
+ assert_match {*can only be called on FUNCTION LOAD command*} $e
+ }
+
+ test {LIBRARIES - delete removed all functions on library} {
+ r function delete lib1
+ r function list
+ } {}
+
+ test {LIBRARIES - register function inside a function} {
+ r function load {#!lua name=lib
+ redis.register_function(
+ 'f1',
+ function(keys, args)
+ redis.register_function(
+ 'f2',
+ function(key, args)
+ return 2
+ end
+ )
+ return 1
+ end
+ )
+ }
+ catch {r fcall f1 0} e
+ set _ $e
+ } {*attempt to call field 'register_function' (a nil value)*}
+
+ test {LIBRARIES - register library with no functions} {
+ r function flush
+ catch {
+ r function load {#!lua name=lib
+ return 1
+ }
+ } e
+ set _ $e
+ } {*No functions registered*}
+
+ test {LIBRARIES - load timeout} {
+ catch {
+ r function load {#!lua name=lib
+ local a = 1
+ while 1 do a = a + 1 end
+ }
+ } e
+ set _ $e
+ } {*FUNCTION LOAD timeout*}
+
+ test {LIBRARIES - verify global protection on the load run} {
+ catch {
+ r function load {#!lua name=lib
+ a = 1
+ }
+ } e
+ set _ $e
+ } {*Attempt to modify a readonly table*}
+
+ test {LIBRARIES - named arguments} {
+ r function load {#!lua name=lib
+ redis.register_function{
+ function_name='f1',
+ callback=function()
+ return 'hello'
+ end,
+ description='some desc'
+ }
+ }
+ r function list
+ } {{library_name lib engine LUA functions {{name f1 description {some desc} flags {}}}}}
+
+ test {LIBRARIES - named arguments, bad function name} {
+ catch {
+ r function load replace {#!lua name=lib
+ redis.register_function{
+ function_name=function() return 1 end,
+ callback=function()
+ return 'hello'
+ end,
+ description='some desc'
+ }
+ }
+ } e
+ set _ $e
+ } {*function_name argument given to redis.register_function must be a string*}
+
+ test {LIBRARIES - named arguments, bad callback type} {
+ catch {
+ r function load replace {#!lua name=lib
+ redis.register_function{
+ function_name='f1',
+ callback='bad',
+ description='some desc'
+ }
+ }
+ } e
+ set _ $e
+ } {*callback argument given to redis.register_function must be a function*}
+
+ test {LIBRARIES - named arguments, bad description} {
+ catch {
+ r function load replace {#!lua name=lib
+ redis.register_function{
+ function_name='f1',
+ callback=function()
+ return 'hello'
+ end,
+ description=function() return 1 end
+ }
+ }
+ } e
+ set _ $e
+ } {*description argument given to redis.register_function must be a string*}
+
+ test {LIBRARIES - named arguments, unknown argument} {
+ catch {
+ r function load replace {#!lua name=lib
+ redis.register_function{
+ function_name='f1',
+ callback=function()
+ return 'hello'
+ end,
+ description='desc',
+ some_unknown='unknown'
+ }
+ }
+ } e
+ set _ $e
+ } {*unknown argument given to redis.register_function*}
+
+ test {LIBRARIES - named arguments, missing function name} {
+ catch {
+ r function load replace {#!lua name=lib
+ redis.register_function{
+ callback=function()
+ return 'hello'
+ end,
+ description='desc'
+ }
+ }
+ } e
+ set _ $e
+ } {*redis.register_function must get a function name argument*}
+
+ test {LIBRARIES - named arguments, missing callback} {
+ catch {
+ r function load replace {#!lua name=lib
+ redis.register_function{
+ function_name='f1',
+ description='desc'
+ }
+ }
+ } e
+ set _ $e
+ } {*redis.register_function must get a callback argument*}
+
+ test {FUNCTION - test function restore with function name collision} {
+ r function flush
+ r function load {#!lua name=lib1
+ local function add1(a)
+ return a + 1
+ end
+ redis.register_function(
+ 'f1',
+ function(keys, args)
+ return add1(1)
+ end
+ )
+ redis.register_function(
+ 'f2',
+ function(keys, args)
+ return add1(2)
+ end
+ )
+ redis.register_function(
+ 'f3',
+ function(keys, args)
+ return add1(3)
+ end
+ )
+ }
+ set e [r function dump]
+ r function flush
+
+ # load a library with different name but with the same function name
+ r function load {#!lua name=lib1
+ redis.register_function(
+ 'f6',
+ function(keys, args)
+ return 7
+ end
+ )
+ }
+ r function load {#!lua name=lib2
+ local function add1(a)
+ return a + 1
+ end
+ redis.register_function(
+ 'f4',
+ function(keys, args)
+ return add1(4)
+ end
+ )
+ redis.register_function(
+ 'f5',
+ function(keys, args)
+ return add1(5)
+ end
+ )
+ redis.register_function(
+ 'f3',
+ function(keys, args)
+ return add1(3)
+ end
+ )
+ }
+
+ catch {r function restore $e} error
+ assert_match {*Library lib1 already exists*} $error
+ assert_equal [r fcall f3 0] {4}
+ assert_equal [r fcall f4 0] {5}
+ assert_equal [r fcall f5 0] {6}
+ assert_equal [r fcall f6 0] {7}
+
+ catch {r function restore $e replace} error
+ assert_match {*Function f3 already exists*} $error
+ assert_equal [r fcall f3 0] {4}
+ assert_equal [r fcall f4 0] {5}
+ assert_equal [r fcall f5 0] {6}
+ assert_equal [r fcall f6 0] {7}
+ }
+
+ test {FUNCTION - test function list with code} {
+ r function flush
+ r function load {#!lua name=library1
+ redis.register_function('f6', function(keys, args) return 7 end)
+ }
+ r function list withcode
+ } {{library_name library1 engine LUA functions {{name f6 description {} flags {}}} library_code {*redis.register_function('f6', function(keys, args) return 7 end)*}}}
+
+ test {FUNCTION - test function list with pattern} {
+ r function load {#!lua name=lib1
+ redis.register_function('f7', function(keys, args) return 7 end)
+ }
+ r function list libraryname library*
+ } {{library_name library1 engine LUA functions {{name f6 description {} flags {}}}}}
+
+ test {FUNCTION - test function list wrong argument} {
+ catch {r function list bad_argument} e
+ set _ $e
+ } {*Unknown argument bad_argument*}
+
+ test {FUNCTION - test function list with bad argument to library name} {
+ catch {r function list libraryname} e
+ set _ $e
+ } {*library name argument was not given*}
+
+ test {FUNCTION - test function list withcode multiple times} {
+ catch {r function list withcode withcode} e
+ set _ $e
+ } {*Unknown argument withcode*}
+
+ test {FUNCTION - test function list libraryname multiple times} {
+ catch {r function list withcode libraryname foo libraryname foo} e
+ set _ $e
+ } {*Unknown argument libraryname*}
+
+ test {FUNCTION - verify OOM on function load and function restore} {
+ r function flush
+ r function load replace {#!lua name=test
+ redis.register_function('f1', function() return 1 end)
+ }
+ set payload [r function dump]
+ r config set maxmemory 1
+
+ r function flush
+ catch {r function load replace {#!lua name=test
+ redis.register_function('f1', function() return 1 end)
+ }} e
+ assert_match {*command not allowed when used memory*} $e
+
+ r function flush
+ catch {r function restore $payload} e
+ assert_match {*command not allowed when used memory*} $e
+
+ r config set maxmemory 0
+ } {OK} {needs:config-maxmemory}
+
+ test {FUNCTION - verify allow-omm allows running any command} {
+ r FUNCTION load replace {#!lua name=f1
+ redis.register_function{
+ function_name='f1',
+ callback=function() return redis.call('set', 'x', '1') end,
+ flags={'allow-oom'}
+ }
+ }
+
+ r config set maxmemory 1
+
+ assert_match {OK} [r fcall f1 1 x]
+ assert_match {1} [r get x]
+
+ r config set maxmemory 0
+ } {OK} {needs:config-maxmemory}
+}
+
+start_server {tags {"scripting"}} {
+ test {FUNCTION - wrong flags type named arguments} {
+ catch {r function load replace {#!lua name=test
+ redis.register_function{
+ function_name = 'f1',
+ callback = function() return 1 end,
+ flags = 'bad flags type'
+ }
+ }} e
+ set _ $e
+ } {*flags argument to redis.register_function must be a table representing function flags*}
+
+ test {FUNCTION - wrong flag type} {
+ catch {r function load replace {#!lua name=test
+ redis.register_function{
+ function_name = 'f1',
+ callback = function() return 1 end,
+ flags = {function() return 1 end}
+ }
+ }} e
+ set _ $e
+ } {*unknown flag given*}
+
+ test {FUNCTION - unknown flag} {
+ catch {r function load replace {#!lua name=test
+ redis.register_function{
+ function_name = 'f1',
+ callback = function() return 1 end,
+ flags = {'unknown'}
+ }
+ }} e
+ set _ $e
+ } {*unknown flag given*}
+
+ test {FUNCTION - write script on fcall_ro} {
+ r function load replace {#!lua name=test
+ redis.register_function{
+ function_name = 'f1',
+ callback = function() return redis.call('set', 'x', 1) end
+ }
+ }
+ catch {r fcall_ro f1 1 x} e
+ set _ $e
+ } {*Can not execute a script with write flag using \*_ro command*}
+
+ test {FUNCTION - write script with no-writes flag} {
+ r function load replace {#!lua name=test
+ redis.register_function{
+ function_name = 'f1',
+ callback = function() return redis.call('set', 'x', 1) end,
+ flags = {'no-writes'}
+ }
+ }
+ catch {r fcall f1 1 x} e
+ set _ $e
+ } {*Write commands are not allowed from read-only scripts*}
+
+ test {FUNCTION - deny oom} {
+ r FUNCTION load replace {#!lua name=test
+ redis.register_function('f1', function() return redis.call('set', 'x', '1') end)
+ }
+
+ r config set maxmemory 1
+
+ catch {[r fcall f1 1 x]} e
+ assert_match {OOM *when used memory > 'maxmemory'*} $e
+
+ r config set maxmemory 0
+ } {OK} {needs:config-maxmemory}
+
+ test {FUNCTION - deny oom on no-writes function} {
+ r FUNCTION load replace {#!lua name=test
+ redis.register_function{function_name='f1', callback=function() return 'hello' end, flags={'no-writes'}}
+ }
+
+ r config set maxmemory 1
+
+ assert_equal [r fcall f1 1 k] hello
+ assert_equal [r fcall_ro f1 1 k] hello
+
+ r config set maxmemory 0
+ } {OK} {needs:config-maxmemory}
+
+ test {FUNCTION - allow stale} {
+ r FUNCTION load replace {#!lua name=test
+ redis.register_function{function_name='f1', callback=function() return 'hello' end, flags={'no-writes'}}
+ redis.register_function{function_name='f2', callback=function() return 'hello' end, flags={'allow-stale', 'no-writes'}}
+ redis.register_function{function_name='f3', callback=function() return redis.call('get', 'x') end, flags={'allow-stale', 'no-writes'}}
+ redis.register_function{function_name='f4', callback=function() return redis.call('info', 'server') end, flags={'allow-stale', 'no-writes'}}
+ }
+
+ r config set replica-serve-stale-data no
+ r replicaof 127.0.0.1 1
+
+ catch {[r fcall f1 0]} e
+ assert_match {MASTERDOWN *} $e
+
+ assert_equal {hello} [r fcall f2 0]
+
+ catch {[r fcall f3 1 x]} e
+ assert_match {ERR *Can not execute the command on a stale replica*} $e
+
+ assert_match {*redis_version*} [r fcall f4 0]
+
+ r replicaof no one
+ r config set replica-serve-stale-data yes
+ set _ {}
+ } {} {external:skip}
+
+ test {FUNCTION - redis version api} {
+ r FUNCTION load replace {#!lua name=test
+ local version = redis.REDIS_VERSION_NUM
+
+ redis.register_function{function_name='get_version_v1', callback=function()
+ return string.format('%s.%s.%s',
+ bit.band(bit.rshift(version, 16), 0x000000ff),
+ bit.band(bit.rshift(version, 8), 0x000000ff),
+ bit.band(version, 0x000000ff))
+ end}
+ redis.register_function{function_name='get_version_v2', callback=function() return redis.REDIS_VERSION end}
+ }
+
+ catch {[r fcall f1 0]} e
+ assert_equal [r fcall get_version_v1 0] [r fcall get_version_v2 0]
+ }
+
+ test {FUNCTION - function stats} {
+ r FUNCTION FLUSH
+
+ r FUNCTION load {#!lua name=test1
+ redis.register_function('f1', function() return 1 end)
+ redis.register_function('f2', function() return 1 end)
+ }
+
+ r FUNCTION load {#!lua name=test2
+ redis.register_function('f3', function() return 1 end)
+ }
+
+ r function stats
+ } {running_script {} engines {LUA {libraries_count 2 functions_count 3}}}
+
+ test {FUNCTION - function stats reloaded correctly from rdb} {
+ r debug reload
+ r function stats
+ } {running_script {} engines {LUA {libraries_count 2 functions_count 3}}} {needs:debug}
+
+ test {FUNCTION - function stats delete library} {
+ r function delete test1
+ r function stats
+ } {running_script {} engines {LUA {libraries_count 1 functions_count 1}}}
+
+ test {FUNCTION - test function stats on loading failure} {
+ r FUNCTION FLUSH
+
+ r FUNCTION load {#!lua name=test1
+ redis.register_function('f1', function() return 1 end)
+ redis.register_function('f2', function() return 1 end)
+ }
+
+ catch {r FUNCTION load {#!lua name=test1
+ redis.register_function('f3', function() return 1 end)
+ }} e
+ assert_match "*Library 'test1' already exists*" $e
+
+
+ r function stats
+ } {running_script {} engines {LUA {libraries_count 1 functions_count 2}}}
+
+ test {FUNCTION - function stats cleaned after flush} {
+ r function flush
+ r function stats
+ } {running_script {} engines {LUA {libraries_count 0 functions_count 0}}}
+
+ test {FUNCTION - function test empty engine} {
+ catch {r function load replace {#! name=test
+ redis.register_function('foo', function() return 1 end)
+ }} e
+ set _ $e
+ } {ERR Engine '' not found}
+
+ test {FUNCTION - function test unknown metadata value} {
+ catch {r function load replace {#!lua name=test foo=bar
+ redis.register_function('foo', function() return 1 end)
+ }} e
+ set _ $e
+ } {ERR Invalid metadata value given: foo=bar}
+
+ test {FUNCTION - function test no name} {
+ catch {r function load replace {#!lua
+ redis.register_function('foo', function() return 1 end)
+ }} e
+ set _ $e
+ } {ERR Library name was not given}
+
+ test {FUNCTION - function test multiple names} {
+ catch {r function load replace {#!lua name=foo name=bar
+ redis.register_function('foo', function() return 1 end)
+ }} e
+ set _ $e
+ } {ERR Invalid metadata value, name argument was given multiple times}
+
+ test {FUNCTION - function test name with quotes} {
+ r function load replace {#!lua name="foo"
+ redis.register_function('foo', function() return 1 end)
+ }
+ } {foo}
+
+ test {FUNCTION - trick global protection 1} {
+ r FUNCTION FLUSH
+
+ r FUNCTION load {#!lua name=test1
+ redis.register_function('f1', function()
+ mt = getmetatable(_G)
+ original_globals = mt.__index
+ original_globals['redis'] = function() return 1 end
+ end)
+ }
+
+ catch {[r fcall f1 0]} e
+ set _ $e
+ } {*Attempt to modify a readonly table*}
+
+ test {FUNCTION - test getmetatable on script load} {
+ r FUNCTION FLUSH
+
+ catch {
+ r FUNCTION load {#!lua name=test1
+ mt = getmetatable(_G)
+ }
+ } e
+
+ set _ $e
+ } {*Script attempted to access nonexistent global variable 'getmetatable'*}
+
+}
diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl
new file mode 100644
index 0000000..6175329
--- /dev/null
+++ b/tests/unit/geo.tcl
@@ -0,0 +1,768 @@
+# Helper functions to simulate search-in-radius in the Tcl side in order to
+# verify the Redis implementation with a fuzzy test.
+proc geo_degrad deg {expr {$deg*(atan(1)*8/360)}}
+proc geo_raddeg rad {expr {$rad/(atan(1)*8/360)}}
+
+proc geo_distance {lon1d lat1d lon2d lat2d} {
+ set lon1r [geo_degrad $lon1d]
+ set lat1r [geo_degrad $lat1d]
+ set lon2r [geo_degrad $lon2d]
+ set lat2r [geo_degrad $lat2d]
+ set v [expr {sin(($lon2r - $lon1r) / 2)}]
+ set u [expr {sin(($lat2r - $lat1r) / 2)}]
+ expr {2.0 * 6372797.560856 * \
+ asin(sqrt($u * $u + cos($lat1r) * cos($lat2r) * $v * $v))}
+}
+
+proc geo_random_point {lonvar latvar} {
+ upvar 1 $lonvar lon
+ upvar 1 $latvar lat
+ # Note that the actual latitude limit should be -85 to +85, we restrict
+ # the test to -70 to +70 since in this range the algorithm is more precise
+ # while outside this range occasionally some element may be missing.
+ set lon [expr {-180 + rand()*360}]
+ set lat [expr {-70 + rand()*140}]
+}
+
+# Return elements non common to both the lists.
+# This code is from http://wiki.tcl.tk/15489
+proc compare_lists {List1 List2} {
+ set DiffList {}
+ foreach Item $List1 {
+ if {[lsearch -exact $List2 $Item] == -1} {
+ lappend DiffList $Item
+ }
+ }
+ foreach Item $List2 {
+ if {[lsearch -exact $List1 $Item] == -1} {
+ if {[lsearch -exact $DiffList $Item] == -1} {
+ lappend DiffList $Item
+ }
+ }
+ }
+ return $DiffList
+}
+
+# return true If a point in circle.
+# search_lon and search_lat define the center of the circle,
+# and lon, lat define the point being searched.
+proc pointInCircle {radius_km lon lat search_lon search_lat} {
+ set radius_m [expr {$radius_km*1000}]
+ set distance [geo_distance $lon $lat $search_lon $search_lat]
+ if {$distance < $radius_m} {
+ return true
+ }
+ return false
+}
+
+# return true If a point in rectangle.
+# search_lon and search_lat define the center of the rectangle,
+# and lon, lat define the point being searched.
+# error: can adjust the width and height of the rectangle according to the error
+proc pointInRectangle {width_km height_km lon lat search_lon search_lat error} {
+ set width_m [expr {$width_km*1000*$error/2}]
+ set height_m [expr {$height_km*1000*$error/2}]
+ set lon_distance [geo_distance $lon $lat $search_lon $lat]
+ set lat_distance [geo_distance $lon $lat $lon $search_lat]
+
+ if {$lon_distance > $width_m || $lat_distance > $height_m} {
+ return false
+ }
+ return true
+}
+
+proc verify_geo_edge_response_bylonlat {expected_response expected_store_response} {
+ catch {r georadius src{t} 1 1 1 km} response
+ assert_match $expected_response $response
+
+ catch {r georadius src{t} 1 1 1 km store dest{t}} response
+ assert_match $expected_store_response $response
+
+ catch {r geosearch src{t} fromlonlat 0 0 byradius 1 km} response
+ assert_match $expected_response $response
+
+ catch {r geosearchstore dest{t} src{t} fromlonlat 0 0 byradius 1 km} response
+ assert_match $expected_store_response $response
+}
+
+proc verify_geo_edge_response_bymember {expected_response expected_store_response} {
+ catch {r georadiusbymember src{t} member 1 km} response
+ assert_match $expected_response $response
+
+ catch {r georadiusbymember src{t} member 1 km store dest{t}} response
+ assert_match $expected_store_response $response
+
+ catch {r geosearch src{t} frommember member bybox 1 1 km} response
+ assert_match $expected_response $response
+
+ catch {r geosearchstore dest{t} src{t} frommember member bybox 1 1 m} response
+ assert_match $expected_store_response $response
+}
+
+proc verify_geo_edge_response_generic {expected_response} {
+ catch {r geodist src{t} member 1 km} response
+ assert_match $expected_response $response
+
+ catch {r geohash src{t} member} response
+ assert_match $expected_response $response
+
+ catch {r geopos src{t} member} response
+ assert_match $expected_response $response
+}
+
+
+# The following list represents sets of random seed, search position
+# and radius that caused bugs in the past. It is used by the randomized
+# test later as a starting point. When the regression vectors are scanned
+# the code reverts to using random data.
+#
+# The format is: seed km lon lat
+set regression_vectors {
+ {1482225976969 7083 81.634948934258375 30.561509253718668}
+ {1482340074151 5416 -70.863281847379767 -46.347003465679947}
+ {1499014685896 6064 -89.818768962202014 -40.463868561416803}
+ {1412 156 149.29737817929004 15.95807862745508}
+ {441574 143 59.235461856813856 66.269555127373678}
+ {160645 187 -101.88575239939883 49.061997951502917}
+ {750269 154 -90.187939661642517 66.615930412251487}
+ {342880 145 163.03472387745728 64.012747720821181}
+ {729955 143 137.86663517256579 63.986745399416776}
+ {939895 151 59.149620271823181 65.204186651485145}
+ {1412 156 149.29737817929004 15.95807862745508}
+ {564862 149 84.062063109158544 -65.685403922426232}
+ {1546032440391 16751 -1.8175081637769495 20.665668878082954}
+}
+set rv_idx 0
+
+start_server {tags {"geo"}} {
+ test {GEO with wrong type src key} {
+ r set src{t} wrong_type
+
+ verify_geo_edge_response_bylonlat "WRONGTYPE*" "WRONGTYPE*"
+ verify_geo_edge_response_bymember "WRONGTYPE*" "WRONGTYPE*"
+ verify_geo_edge_response_generic "WRONGTYPE*"
+ }
+
+ test {GEO with non existing src key} {
+ r del src{t}
+
+ verify_geo_edge_response_bylonlat {} 0
+ verify_geo_edge_response_bymember {} 0
+ }
+
+ test {GEO BYLONLAT with empty search} {
+ r del src{t}
+ r geoadd src{t} 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania"
+
+ verify_geo_edge_response_bylonlat {} 0
+ }
+
+ test {GEO BYMEMBER with non existing member} {
+ r del src{t}
+ r geoadd src{t} 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania"
+
+ verify_geo_edge_response_bymember "ERR*" "ERR*"
+ }
+
+ test {GEOADD create} {
+ r geoadd nyc -73.9454966 40.747533 "lic market"
+ } {1}
+
+ test {GEOADD update} {
+ r geoadd nyc -73.9454966 40.747533 "lic market"
+ } {0}
+
+ test {GEOADD update with CH option} {
+ assert_equal 1 [r geoadd nyc CH 40.747533 -73.9454966 "lic market"]
+ lassign [lindex [r geopos nyc "lic market"] 0] x1 y1
+ assert {abs($x1) - 40.747 < 0.001}
+ assert {abs($y1) - 73.945 < 0.001}
+ } {}
+
+ test {GEOADD update with NX option} {
+ assert_equal 0 [r geoadd nyc NX -73.9454966 40.747533 "lic market"]
+ lassign [lindex [r geopos nyc "lic market"] 0] x1 y1
+ assert {abs($x1) - 40.747 < 0.001}
+ assert {abs($y1) - 73.945 < 0.001}
+ } {}
+
+ test {GEOADD update with XX option} {
+ assert_equal 0 [r geoadd nyc XX -83.9454966 40.747533 "lic market"]
+ lassign [lindex [r geopos nyc "lic market"] 0] x1 y1
+ assert {abs($x1) - 83.945 < 0.001}
+ assert {abs($y1) - 40.747 < 0.001}
+ } {}
+
+ test {GEOADD update with CH NX option} {
+ r geoadd nyc CH NX -73.9454966 40.747533 "lic market"
+ } {0}
+
+ test {GEOADD update with CH XX option} {
+ r geoadd nyc CH XX -73.9454966 40.747533 "lic market"
+ } {1}
+
+ test {GEOADD update with XX NX option will return syntax error} {
+ catch {
+ r geoadd nyc xx nx -73.9454966 40.747533 "lic market"
+ } err
+ set err
+ } {ERR *syntax*}
+
+ test {GEOADD update with invalid option} {
+ catch {
+ r geoadd nyc ch xx foo -73.9454966 40.747533 "lic market"
+ } err
+ set err
+ } {ERR *syntax*}
+
+ test {GEOADD invalid coordinates} {
+ catch {
+ r geoadd nyc -73.9454966 40.747533 "lic market" \
+ foo bar "luck market"
+ } err
+ set err
+ } {*valid*}
+
+ test {GEOADD multi add} {
+ r geoadd nyc -73.9733487 40.7648057 "central park n/q/r" -73.9903085 40.7362513 "union square" -74.0131604 40.7126674 "wtc one" -73.7858139 40.6428986 "jfk" -73.9375699 40.7498929 "q4" -73.9564142 40.7480973 4545
+ } {6}
+
+ test {Check geoset values} {
+ r zrange nyc 0 -1 withscores
+ } {{wtc one} 1791873972053020 {union square} 1791875485187452 {central park n/q/r} 1791875761332224 4545 1791875796750882 {lic market} 1791875804419201 q4 1791875830079666 jfk 1791895905559723}
+
+ test {GEORADIUS simple (sorted)} {
+ r georadius nyc -73.9798091 40.7598464 3 km asc
+ } {{central park n/q/r} 4545 {union square}}
+
+ test {GEORADIUS_RO simple (sorted)} {
+ r georadius_ro nyc -73.9798091 40.7598464 3 km asc
+ } {{central park n/q/r} 4545 {union square}}
+
+ test {GEOSEARCH simple (sorted)} {
+ r geosearch nyc fromlonlat -73.9798091 40.7598464 bybox 6 6 km asc
+ } {{central park n/q/r} 4545 {union square} {lic market}}
+
+ test {GEOSEARCH FROMLONLAT and FROMMEMBER cannot exist at the same time} {
+ catch {r geosearch nyc fromlonlat -73.9798091 40.7598464 frommember xxx bybox 6 6 km asc} e
+ set e
+ } {ERR *syntax*}
+
+ test {GEOSEARCH FROMLONLAT and FROMMEMBER one must exist} {
+ catch {r geosearch nyc bybox 3 3 km asc desc withhash withdist withcoord} e
+ set e
+ } {ERR *exactly one of FROMMEMBER or FROMLONLAT*}
+
+ test {GEOSEARCH BYRADIUS and BYBOX cannot exist at the same time} {
+ catch {r geosearch nyc fromlonlat -73.9798091 40.7598464 byradius 3 km bybox 3 3 km asc} e
+ set e
+ } {ERR *syntax*}
+
+ test {GEOSEARCH BYRADIUS and BYBOX one must exist} {
+ catch {r geosearch nyc fromlonlat -73.9798091 40.7598464 asc desc withhash withdist withcoord} e
+ set e
+ } {ERR *exactly one of BYRADIUS and BYBOX*}
+
+ test {GEOSEARCH with STOREDIST option} {
+ catch {r geosearch nyc fromlonlat -73.9798091 40.7598464 bybox 6 6 km asc storedist} e
+ set e
+ } {ERR *syntax*}
+
+ test {GEORADIUS withdist (sorted)} {
+ r georadius nyc -73.9798091 40.7598464 3 km withdist asc
+ } {{{central park n/q/r} 0.7750} {4545 2.3651} {{union square} 2.7697}}
+
+ test {GEOSEARCH withdist (sorted)} {
+ r geosearch nyc fromlonlat -73.9798091 40.7598464 bybox 6 6 km withdist asc
+ } {{{central park n/q/r} 0.7750} {4545 2.3651} {{union square} 2.7697} {{lic market} 3.1991}}
+
+ test {GEORADIUS with COUNT} {
+ r georadius nyc -73.9798091 40.7598464 10 km COUNT 3
+ } {{central park n/q/r} 4545 {union square}}
+
+ test {GEORADIUS with multiple WITH* tokens} {
+ assert_match {{{central park n/q/r} 1791875761332224 {-73.97334* 40.76480*}} {4545 1791875796750882 {-73.95641* 40.74809*}}} [r georadius nyc -73.9798091 40.7598464 10 km WITHCOORD WITHHASH COUNT 2]
+ assert_match {{{central park n/q/r} 1791875761332224 {-73.97334* 40.76480*}} {4545 1791875796750882 {-73.95641* 40.74809*}}} [r georadius nyc -73.9798091 40.7598464 10 km WITHHASH WITHCOORD COUNT 2]
+ assert_match {{{central park n/q/r} 0.7750 1791875761332224 {-73.97334* 40.76480*}} {4545 2.3651 1791875796750882 {-73.95641* 40.74809*}}} [r georadius nyc -73.9798091 40.7598464 10 km WITHDIST WITHHASH WITHCOORD COUNT 2]
+ }
+
+ test {GEORADIUS with ANY not sorted by default} {
+ r georadius nyc -73.9798091 40.7598464 10 km COUNT 3 ANY
+ } {{wtc one} {union square} {central park n/q/r}}
+
+ test {GEORADIUS with ANY sorted by ASC} {
+ r georadius nyc -73.9798091 40.7598464 10 km COUNT 3 ANY ASC
+ } {{central park n/q/r} {union square} {wtc one}}
+
+ test {GEORADIUS with ANY but no COUNT} {
+ catch {r georadius nyc -73.9798091 40.7598464 10 km ANY ASC} e
+ set e
+ } {ERR *ANY*requires*COUNT*}
+
+ test {GEORADIUS with COUNT but missing integer argument} {
+ catch {r georadius nyc -73.9798091 40.7598464 10 km COUNT} e
+ set e
+ } {ERR *syntax*}
+
+ test {GEORADIUS with COUNT DESC} {
+ r georadius nyc -73.9798091 40.7598464 10 km COUNT 2 DESC
+ } {{wtc one} q4}
+
+ test {GEORADIUS HUGE, issue #2767} {
+ r geoadd users -47.271613776683807 -54.534504198047678 user_000000
+ llength [r GEORADIUS users 0 0 50000 km WITHCOORD]
+ } {1}
+
+ test {GEORADIUSBYMEMBER simple (sorted)} {
+ r georadiusbymember nyc "wtc one" 7 km
+ } {{wtc one} {union square} {central park n/q/r} 4545 {lic market}}
+
+ test {GEORADIUSBYMEMBER_RO simple (sorted)} {
+ r georadiusbymember_ro nyc "wtc one" 7 km
+ } {{wtc one} {union square} {central park n/q/r} 4545 {lic market}}
+
+ test {GEORADIUSBYMEMBER search areas contain satisfied points in oblique direction} {
+ r del k1
+
+ r geoadd k1 -0.15307903289794921875 85 n1 0.3515625 85.00019260486917005437 n2
+ set ret1 [r GEORADIUSBYMEMBER k1 n1 4891.94 m]
+ assert_equal $ret1 {n1 n2}
+
+ r zrem k1 n1 n2
+ r geoadd k1 -4.95211958885192871094 85 n3 11.25 85.0511 n4
+ set ret2 [r GEORADIUSBYMEMBER k1 n3 156544 m]
+ assert_equal $ret2 {n3 n4}
+
+ r zrem k1 n3 n4
+ r geoadd k1 -45 65.50900022111811438208 n5 90 85.0511 n6
+ set ret3 [r GEORADIUSBYMEMBER k1 n5 5009431 m]
+ assert_equal $ret3 {n5 n6}
+ }
+
+ test {GEORADIUSBYMEMBER crossing pole search} {
+ r del k1
+ r geoadd k1 45 65 n1 -135 85.05 n2
+ set ret [r GEORADIUSBYMEMBER k1 n1 5009431 m]
+ assert_equal $ret {n1 n2}
+ }
+
+ test {GEOSEARCH FROMMEMBER simple (sorted)} {
+ r geosearch nyc frommember "wtc one" bybox 14 14 km
+ } {{wtc one} {union square} {central park n/q/r} 4545 {lic market} q4}
+
+ test {GEOSEARCH vs GEORADIUS} {
+ r del Sicily
+ r geoadd Sicily 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania"
+ r geoadd Sicily 12.758489 38.788135 "edge1" 17.241510 38.788135 "eage2"
+ set ret1 [r georadius Sicily 15 37 200 km asc]
+ assert_equal $ret1 {Catania Palermo}
+ set ret2 [r geosearch Sicily fromlonlat 15 37 bybox 400 400 km asc]
+ assert_equal $ret2 {Catania Palermo eage2 edge1}
+ }
+
+ test {GEOSEARCH non square, long and narrow} {
+ r del Sicily
+ r geoadd Sicily 12.75 36.995 "test1"
+ r geoadd Sicily 12.75 36.50 "test2"
+ r geoadd Sicily 13.00 36.50 "test3"
+ # box height=2km width=400km
+ set ret1 [r geosearch Sicily fromlonlat 15 37 bybox 400 2 km]
+ assert_equal $ret1 {test1}
+
+ # Add a western Hemisphere point
+ r geoadd Sicily -1 37.00 "test3"
+ set ret2 [r geosearch Sicily fromlonlat 15 37 bybox 3000 2 km asc]
+ assert_equal $ret2 {test1 test3}
+ }
+
+ test {GEOSEARCH corner point test} {
+ r del Sicily
+ r geoadd Sicily 12.758489 38.788135 edge1 17.241510 38.788135 edge2 17.250000 35.202000 edge3 12.750000 35.202000 edge4 12.748489955781654 37 edge5 15 38.798135872540925 edge6 17.251510044218346 37 edge7 15 35.201864127459075 edge8 12.692799634687903 38.798135872540925 corner1 12.692799634687903 38.798135872540925 corner2 17.200560937451133 35.201864127459075 corner3 12.799439062548865 35.201864127459075 corner4
+ set ret [lsort [r geosearch Sicily fromlonlat 15 37 bybox 400 400 km asc]]
+ assert_equal $ret {edge1 edge2 edge5 edge7}
+ }
+
+ test {GEORADIUSBYMEMBER withdist (sorted)} {
+ r georadiusbymember nyc "wtc one" 7 km withdist
+ } {{{wtc one} 0.0000} {{union square} 3.2544} {{central park n/q/r} 6.7000} {4545 6.1975} {{lic market} 6.8969}}
+
+ test {GEOHASH is able to return geohash strings} {
+ # Example from Wikipedia.
+ r del points
+ r geoadd points -5.6 42.6 test
+ lindex [r geohash points test] 0
+ } {ezs42e44yx0}
+
+ test {GEOHASH with only key as argument} {
+ r del points
+ r geoadd points 10 20 a 30 40 b
+ set result [r geohash points]
+ assert {$result eq {}}
+ }
+
+ test {GEOPOS simple} {
+ r del points
+ r geoadd points 10 20 a 30 40 b
+ lassign [lindex [r geopos points a b] 0] x1 y1
+ lassign [lindex [r geopos points a b] 1] x2 y2
+ assert {abs($x1 - 10) < 0.001}
+ assert {abs($y1 - 20) < 0.001}
+ assert {abs($x2 - 30) < 0.001}
+ assert {abs($y2 - 40) < 0.001}
+ }
+
+ test {GEOPOS missing element} {
+ r del points
+ r geoadd points 10 20 a 30 40 b
+ lindex [r geopos points a x b] 1
+ } {}
+
+ test {GEOPOS with only key as argument} {
+ r del points
+ r geoadd points 10 20 a 30 40 b
+ set result [r geopos points]
+ assert {$result eq {}}
+ }
+
+ test {GEODIST simple & unit} {
+ r del points
+ r geoadd points 13.361389 38.115556 "Palermo" \
+ 15.087269 37.502669 "Catania"
+ set m [r geodist points Palermo Catania]
+ assert {$m > 166274 && $m < 166275}
+ set km [r geodist points Palermo Catania km]
+ assert {$km > 166.2 && $km < 166.3}
+ set dist [r geodist points Palermo Palermo]
+ assert {$dist eq 0.0000}
+ }
+
+ test {GEODIST missing elements} {
+ r del points
+ r geoadd points 13.361389 38.115556 "Palermo" \
+ 15.087269 37.502669 "Catania"
+ set m [r geodist points Palermo Agrigento]
+ assert {$m eq {}}
+ set m [r geodist points Ragusa Agrigento]
+ assert {$m eq {}}
+ set m [r geodist empty_key Palermo Catania]
+ assert {$m eq {}}
+ }
+
+ test {GEORADIUS STORE option: syntax error} {
+ r del points{t}
+ r geoadd points{t} 13.361389 38.115556 "Palermo" \
+ 15.087269 37.502669 "Catania"
+ catch {r georadius points{t} 13.361389 38.115556 50 km store} e
+ set e
+ } {*ERR*syntax*}
+
+ test {GEOSEARCHSTORE STORE option: syntax error} {
+ catch {r geosearchstore abc{t} points{t} fromlonlat 13.361389 38.115556 byradius 50 km store abc{t}} e
+ set e
+ } {*ERR*syntax*}
+
+ test {GEORANGE STORE option: incompatible options} {
+ r del points{t}
+ r geoadd points{t} 13.361389 38.115556 "Palermo" \
+ 15.087269 37.502669 "Catania"
+ catch {r georadius points{t} 13.361389 38.115556 50 km store points2{t} withdist} e
+ assert_match {*ERR*} $e
+ catch {r georadius points{t} 13.361389 38.115556 50 km store points2{t} withhash} e
+ assert_match {*ERR*} $e
+ catch {r georadius points{t} 13.361389 38.115556 50 km store points2{t} withcoords} e
+ assert_match {*ERR*} $e
+ }
+
+ test {GEORANGE STORE option: plain usage} {
+ r del points{t}
+ r geoadd points{t} 13.361389 38.115556 "Palermo" \
+ 15.087269 37.502669 "Catania"
+ r georadius points{t} 13.361389 38.115556 500 km store points2{t}
+ assert_equal [r zrange points{t} 0 -1] [r zrange points2{t} 0 -1]
+ }
+
+ test {GEORADIUSBYMEMBER STORE/STOREDIST option: plain usage} {
+ r del points{t}
+ r geoadd points{t} 13.361389 38.115556 "Palermo" 15.087269 37.502669 "Catania"
+
+ r georadiusbymember points{t} Palermo 500 km store points2{t}
+ assert_equal {Palermo Catania} [r zrange points2{t} 0 -1]
+
+ r georadiusbymember points{t} Catania 500 km storedist points2{t}
+ assert_equal {Catania Palermo} [r zrange points2{t} 0 -1]
+
+ set res [r zrange points2{t} 0 -1 withscores]
+ assert {[lindex $res 1] < 1}
+ assert {[lindex $res 3] > 166}
+ }
+
+ test {GEOSEARCHSTORE STORE option: plain usage} {
+ r geosearchstore points2{t} points{t} fromlonlat 13.361389 38.115556 byradius 500 km
+ assert_equal [r zrange points{t} 0 -1] [r zrange points2{t} 0 -1]
+ }
+
+ test {GEORANGE STOREDIST option: plain usage} {
+ r del points{t}
+ r geoadd points{t} 13.361389 38.115556 "Palermo" \
+ 15.087269 37.502669 "Catania"
+ r georadius points{t} 13.361389 38.115556 500 km storedist points2{t}
+ set res [r zrange points2{t} 0 -1 withscores]
+ assert {[lindex $res 1] < 1}
+ assert {[lindex $res 3] > 166}
+ assert {[lindex $res 3] < 167}
+ }
+
+ test {GEOSEARCHSTORE STOREDIST option: plain usage} {
+ r geosearchstore points2{t} points{t} fromlonlat 13.361389 38.115556 byradius 500 km storedist
+ set res [r zrange points2{t} 0 -1 withscores]
+ assert {[lindex $res 1] < 1}
+ assert {[lindex $res 3] > 166}
+ assert {[lindex $res 3] < 167}
+ }
+
+ test {GEORANGE STOREDIST option: COUNT ASC and DESC} {
+ r del points{t}
+ r geoadd points{t} 13.361389 38.115556 "Palermo" \
+ 15.087269 37.502669 "Catania"
+ r georadius points{t} 13.361389 38.115556 500 km storedist points2{t} asc count 1
+ assert {[r zcard points2{t}] == 1}
+ set res [r zrange points2{t} 0 -1 withscores]
+ assert {[lindex $res 0] eq "Palermo"}
+
+ r georadius points{t} 13.361389 38.115556 500 km storedist points2{t} desc count 1
+ assert {[r zcard points2{t}] == 1}
+ set res [r zrange points2{t} 0 -1 withscores]
+ assert {[lindex $res 0] eq "Catania"}
+ }
+
+ test {GEOSEARCH the box spans -180° or 180°} {
+ r del points
+ r geoadd points 179.5 36 point1
+ r geoadd points -179.5 36 point2
+ assert_equal {point1 point2} [r geosearch points fromlonlat 179 37 bybox 400 400 km asc]
+ assert_equal {point2 point1} [r geosearch points fromlonlat -179 37 bybox 400 400 km asc]
+ }
+
+ test {GEOSEARCH with small distance} {
+ r del points
+ r geoadd points -122.407107 37.794300 1
+ r geoadd points -122.227336 37.794300 2
+ assert_equal {{1 0.0001} {2 9.8182}} [r GEORADIUS points -122.407107 37.794300 30 mi ASC WITHDIST]
+ }
+
+ foreach {type} {byradius bybox} {
+ test "GEOSEARCH fuzzy test - $type" {
+ if {$::accurate} { set attempt 300 } else { set attempt 30 }
+ while {[incr attempt -1]} {
+ set rv [lindex $regression_vectors $rv_idx]
+ incr rv_idx
+
+ set radius_km 0; set width_km 0; set height_km 0
+ unset -nocomplain debuginfo
+ set srand_seed [clock milliseconds]
+ if {$rv ne {}} {set srand_seed [lindex $rv 0]}
+ lappend debuginfo "srand_seed is $srand_seed"
+ expr {srand($srand_seed)} ; # If you need a reproducible run
+ r del mypoints
+
+ if {[randomInt 10] == 0} {
+ # From time to time use very big radiuses
+ if {$type == "byradius"} {
+ set radius_km [expr {[randomInt 5000]+10}]
+ } elseif {$type == "bybox"} {
+ set width_km [expr {[randomInt 5000]+10}]
+ set height_km [expr {[randomInt 5000]+10}]
+ }
+ } else {
+ # Normally use a few - ~200km radiuses to stress
+ # test the code the most in edge cases.
+ if {$type == "byradius"} {
+ set radius_km [expr {[randomInt 200]+10}]
+ } elseif {$type == "bybox"} {
+ set width_km [expr {[randomInt 200]+10}]
+ set height_km [expr {[randomInt 200]+10}]
+ }
+ }
+ if {$rv ne {}} {
+ set radius_km [lindex $rv 1]
+ set width_km [lindex $rv 1]
+ set height_km [lindex $rv 1]
+ }
+ geo_random_point search_lon search_lat
+ if {$rv ne {}} {
+ set search_lon [lindex $rv 2]
+ set search_lat [lindex $rv 3]
+ }
+ lappend debuginfo "Search area: $search_lon,$search_lat $radius_km $width_km $height_km km"
+ set tcl_result {}
+ set argv {}
+ for {set j 0} {$j < 20000} {incr j} {
+ geo_random_point lon lat
+ lappend argv $lon $lat "place:$j"
+ if {$type == "byradius"} {
+ if {[pointInCircle $radius_km $lon $lat $search_lon $search_lat]} {
+ lappend tcl_result "place:$j"
+ }
+ } elseif {$type == "bybox"} {
+ if {[pointInRectangle $width_km $height_km $lon $lat $search_lon $search_lat 1]} {
+ lappend tcl_result "place:$j"
+ }
+ }
+ lappend debuginfo "place:$j $lon $lat"
+ }
+ r geoadd mypoints {*}$argv
+ if {$type == "byradius"} {
+ set res [lsort [r geosearch mypoints fromlonlat $search_lon $search_lat byradius $radius_km km]]
+ } elseif {$type == "bybox"} {
+ set res [lsort [r geosearch mypoints fromlonlat $search_lon $search_lat bybox $width_km $height_km km]]
+ }
+ set res2 [lsort $tcl_result]
+ set test_result OK
+
+ if {$res != $res2} {
+ set rounding_errors 0
+ set diff [compare_lists $res $res2]
+ foreach place $diff {
+ lassign [lindex [r geopos mypoints $place] 0] lon lat
+ set mydist [geo_distance $lon $lat $search_lon $search_lat]
+ set mydist [expr $mydist/1000]
+ if {$type == "byradius"} {
+ if {($mydist / $radius_km) > 0.999} {
+ incr rounding_errors
+ continue
+ }
+ if {$mydist < [expr {$radius_km*1000}]} {
+ # This is a false positive for redis since given the
+ # same points the higher precision calculation provided
+ # by TCL shows the point within range
+ incr rounding_errors
+ continue
+ }
+ } elseif {$type == "bybox"} {
+ # we add 0.1% error for floating point calculation error
+ if {[pointInRectangle $width_km $height_km $lon $lat $search_lon $search_lat 1.001]} {
+ incr rounding_errors
+ continue
+ }
+ }
+ }
+
+ # Make sure this is a real error and not a rounidng issue.
+ if {[llength $diff] == $rounding_errors} {
+ set res $res2; # Error silenced
+ }
+ }
+
+ if {$res != $res2} {
+ set diff [compare_lists $res $res2]
+ puts "*** Possible problem in GEO radius query ***"
+ puts "Redis: $res"
+ puts "Tcl : $res2"
+ puts "Diff : $diff"
+ puts [join $debuginfo "\n"]
+ foreach place $diff {
+ if {[lsearch -exact $res2 $place] != -1} {
+ set where "(only in Tcl)"
+ } else {
+ set where "(only in Redis)"
+ }
+ lassign [lindex [r geopos mypoints $place] 0] lon lat
+ set mydist [geo_distance $lon $lat $search_lon $search_lat]
+ set mydist [expr $mydist/1000]
+ puts "$place -> [r geopos mypoints $place] $mydist $where"
+ }
+ set test_result FAIL
+ }
+ unset -nocomplain debuginfo
+ if {$test_result ne {OK}} break
+ }
+ set test_result
+ } {OK}
+ }
+
+ test {GEOSEARCH box edges fuzzy test} {
+ if {$::accurate} { set attempt 300 } else { set attempt 30 }
+ while {[incr attempt -1]} {
+ unset -nocomplain debuginfo
+ set srand_seed [clock milliseconds]
+ lappend debuginfo "srand_seed is $srand_seed"
+ expr {srand($srand_seed)} ; # If you need a reproducible run
+ r del mypoints
+
+ geo_random_point search_lon search_lat
+ set width_m [expr {[randomInt 10000]+10}]
+ set height_m [expr {[randomInt 10000]+10}]
+ set lat_delta [geo_raddeg [expr {$height_m/2/6372797.560856}]]
+ set long_delta_top [geo_raddeg [expr {$width_m/2/6372797.560856/cos([geo_degrad [expr {$search_lat+$lat_delta}]])}]]
+ set long_delta_middle [geo_raddeg [expr {$width_m/2/6372797.560856/cos([geo_degrad $search_lat])}]]
+ set long_delta_bottom [geo_raddeg [expr {$width_m/2/6372797.560856/cos([geo_degrad [expr {$search_lat-$lat_delta}]])}]]
+
+ # Total of 8 points are generated, which are located at each vertex and the center of each side
+ set points(north) [list $search_lon [expr {$search_lat+$lat_delta}]]
+ set points(south) [list $search_lon [expr {$search_lat-$lat_delta}]]
+ set points(east) [list [expr {$search_lon+$long_delta_middle}] $search_lat]
+ set points(west) [list [expr {$search_lon-$long_delta_middle}] $search_lat]
+ set points(north_east) [list [expr {$search_lon+$long_delta_top}] [expr {$search_lat+$lat_delta}]]
+ set points(north_west) [list [expr {$search_lon-$long_delta_top}] [expr {$search_lat+$lat_delta}]]
+ set points(south_east) [list [expr {$search_lon+$long_delta_bottom}] [expr {$search_lat-$lat_delta}]]
+ set points(south_west) [list [expr {$search_lon-$long_delta_bottom}] [expr {$search_lat-$lat_delta}]]
+
+ lappend debuginfo "Search area: geosearch mypoints fromlonlat $search_lon $search_lat bybox $width_m $height_m m"
+ set tcl_result {}
+ foreach name [array names points] {
+ set x [lindex $points($name) 0]
+ set y [lindex $points($name) 1]
+ # If longitude crosses -180° or 180°, we need to convert it.
+ # latitude doesn't have this problem, because it's scope is -70~70, see geo_random_point
+ if {$x > 180} {
+ set x [expr {$x-360}]
+ } elseif {$x < -180} {
+ set x [expr {$x+360}]
+ }
+ r geoadd mypoints $x $y place:$name
+ lappend tcl_result "place:$name"
+ lappend debuginfo "geoadd mypoints $x $y place:$name"
+ }
+
+ set res2 [lsort $tcl_result]
+
+ # make the box larger by two meter in each direction to put the coordinate slightly inside the box.
+ set height_new [expr {$height_m+4}]
+ set width_new [expr {$width_m+4}]
+ set res [lsort [r geosearch mypoints fromlonlat $search_lon $search_lat bybox $width_new $height_new m]]
+ if {$res != $res2} {
+ set diff [compare_lists $res $res2]
+ lappend debuginfo "res: $res, res2: $res2, diff: $diff"
+ fail "place should be found, debuginfo: $debuginfo, height_new: $height_new width_new: $width_new"
+ }
+
+ # The width decreases and the height increases. Only north and south are found
+ set width_new [expr {$width_m-4}]
+ set height_new [expr {$height_m+4}]
+ set res [lsort [r geosearch mypoints fromlonlat $search_lon $search_lat bybox $width_new $height_new m]]
+ if {$res != {place:north place:south}} {
+ lappend debuginfo "res: $res"
+ fail "place should not be found, debuginfo: $debuginfo, height_new: $height_new width_new: $width_new"
+ }
+
+ # The width increases and the height decreases. Only ease and west are found
+ set width_new [expr {$width_m+4}]
+ set height_new [expr {$height_m-4}]
+ set res [lsort [r geosearch mypoints fromlonlat $search_lon $search_lat bybox $width_new $height_new m]]
+ if {$res != {place:east place:west}} {
+ lappend debuginfo "res: $res"
+ fail "place should not be found, debuginfo: $debuginfo, height_new: $height_new width_new: $width_new"
+ }
+
+ # make the box smaller by two meter in each direction to put the coordinate slightly outside the box.
+ set height_new [expr {$height_m-4}]
+ set width_new [expr {$width_m-4}]
+ set res [r geosearch mypoints fromlonlat $search_lon $search_lat bybox $width_new $height_new m]
+ if {$res != ""} {
+ lappend debuginfo "res: $res"
+ fail "place should not be found, debuginfo: $debuginfo, height_new: $height_new width_new: $width_new"
+ }
+ unset -nocomplain debuginfo
+ }
+ }
+}
diff --git a/tests/unit/hyperloglog.tcl b/tests/unit/hyperloglog.tcl
new file mode 100644
index 0000000..ee43718
--- /dev/null
+++ b/tests/unit/hyperloglog.tcl
@@ -0,0 +1,271 @@
+start_server {tags {"hll"}} {
+ test {HyperLogLog self test passes} {
+ catch {r pfselftest} e
+ set e
+ } {OK} {needs:pfdebug}
+
+ test {PFADD without arguments creates an HLL value} {
+ r pfadd hll
+ r exists hll
+ } {1}
+
+ test {Approximated cardinality after creation is zero} {
+ r pfcount hll
+ } {0}
+
+ test {PFADD returns 1 when at least 1 reg was modified} {
+ r pfadd hll a b c
+ } {1}
+
+ test {PFADD returns 0 when no reg was modified} {
+ r pfadd hll a b c
+ } {0}
+
+ test {PFADD works with empty string (regression)} {
+ r pfadd hll ""
+ }
+
+ # Note that the self test stresses much better the
+ # cardinality estimation error. We are testing just the
+ # command implementation itself here.
+ test {PFCOUNT returns approximated cardinality of set} {
+ r del hll
+ set res {}
+ r pfadd hll 1 2 3 4 5
+ lappend res [r pfcount hll]
+ # Call it again to test cached value invalidation.
+ r pfadd hll 6 7 8 8 9 10
+ lappend res [r pfcount hll]
+ set res
+ } {5 10}
+
+ test {HyperLogLogs are promote from sparse to dense} {
+ r del hll
+ r config set hll-sparse-max-bytes 3000
+ set n 0
+ while {$n < 100000} {
+ set elements {}
+ for {set j 0} {$j < 100} {incr j} {lappend elements [expr rand()]}
+ incr n 100
+ r pfadd hll {*}$elements
+ set card [r pfcount hll]
+ set err [expr {abs($card-$n)}]
+ assert {$err < (double($card)/100)*5}
+ if {$n < 1000} {
+ assert {[r pfdebug encoding hll] eq {sparse}}
+ } elseif {$n > 10000} {
+ assert {[r pfdebug encoding hll] eq {dense}}
+ }
+ }
+ } {} {needs:pfdebug}
+
+ test {Change hll-sparse-max-bytes} {
+ r config set hll-sparse-max-bytes 3000
+ r del hll
+ r pfadd hll a b c d e d g h i j k
+ assert {[r pfdebug encoding hll] eq {sparse}}
+ r config set hll-sparse-max-bytes 30
+ r pfadd hll new_element
+ assert {[r pfdebug encoding hll] eq {dense}}
+ } {} {needs:pfdebug}
+
+ test {Hyperloglog promote to dense well in different hll-sparse-max-bytes} {
+ set max(0) 100
+ set max(1) 500
+ set max(2) 3000
+ for {set i 0} {$i < [array size max]} {incr i} {
+ r config set hll-sparse-max-bytes $max($i)
+ r del hll
+ r pfadd hll
+ set len [r strlen hll]
+ while {$len <= $max($i)} {
+ assert {[r pfdebug encoding hll] eq {sparse}}
+ set elements {}
+ for {set j 0} {$j < 10} {incr j} { lappend elements [expr rand()]}
+ r pfadd hll {*}$elements
+ set len [r strlen hll]
+ }
+ assert {[r pfdebug encoding hll] eq {dense}}
+ }
+ } {} {needs:pfdebug}
+
+ test {HyperLogLog sparse encoding stress test} {
+ for {set x 0} {$x < 1000} {incr x} {
+ r del hll1
+ r del hll2
+ set numele [randomInt 100]
+ set elements {}
+ for {set j 0} {$j < $numele} {incr j} {
+ lappend elements [expr rand()]
+ }
+ # Force dense representation of hll2
+ r pfadd hll2
+ r pfdebug todense hll2
+ r pfadd hll1 {*}$elements
+ r pfadd hll2 {*}$elements
+ assert {[r pfdebug encoding hll1] eq {sparse}}
+ assert {[r pfdebug encoding hll2] eq {dense}}
+ # Cardinality estimated should match exactly.
+ assert {[r pfcount hll1] eq [r pfcount hll2]}
+ }
+ } {} {needs:pfdebug}
+
+ test {Corrupted sparse HyperLogLogs are detected: Additional at tail} {
+ r del hll
+ r pfadd hll a b c
+ r append hll "hello"
+ set e {}
+ catch {r pfcount hll} e
+ set e
+ } {*INVALIDOBJ*}
+
+ test {Corrupted sparse HyperLogLogs are detected: Broken magic} {
+ r del hll
+ r pfadd hll a b c
+ r setrange hll 0 "0123"
+ set e {}
+ catch {r pfcount hll} e
+ set e
+ } {*WRONGTYPE*}
+
+ test {Corrupted sparse HyperLogLogs are detected: Invalid encoding} {
+ r del hll
+ r pfadd hll a b c
+ r setrange hll 4 "x"
+ set e {}
+ catch {r pfcount hll} e
+ set e
+ } {*WRONGTYPE*}
+
+ test {Corrupted dense HyperLogLogs are detected: Wrong length} {
+ r del hll
+ r pfadd hll a b c
+ r setrange hll 4 "\x00"
+ set e {}
+ catch {r pfcount hll} e
+ set e
+ } {*WRONGTYPE*}
+
+ test {Fuzzing dense/sparse encoding: Redis should always detect errors} {
+ for {set j 0} {$j < 1000} {incr j} {
+ r del hll
+ set items {}
+ set numitems [randomInt 3000]
+ for {set i 0} {$i < $numitems} {incr i} {
+ lappend items [expr {rand()}]
+ }
+ r pfadd hll {*}$items
+
+ # Corrupt it in some random way.
+ for {set i 0} {$i < 5} {incr i} {
+ set len [r strlen hll]
+ set pos [randomInt $len]
+ set byte [randstring 1 1 binary]
+ r setrange hll $pos $byte
+ # Don't modify more bytes 50% of times
+ if {rand() < 0.5} break
+ }
+
+ # Use the hyperloglog to check if it crashes
+ # Redis in some way.
+ catch {
+ r pfcount hll
+ }
+ }
+ }
+
+ test {PFADD, PFCOUNT, PFMERGE type checking works} {
+ r set foo{t} bar
+ catch {r pfadd foo{t} 1} e
+ assert_match {*WRONGTYPE*} $e
+ catch {r pfcount foo{t}} e
+ assert_match {*WRONGTYPE*} $e
+ catch {r pfmerge bar{t} foo{t}} e
+ assert_match {*WRONGTYPE*} $e
+ catch {r pfmerge foo{t} bar{t}} e
+ assert_match {*WRONGTYPE*} $e
+ }
+
+ test {PFMERGE results on the cardinality of union of sets} {
+ r del hll{t} hll1{t} hll2{t} hll3{t}
+ r pfadd hll1{t} a b c
+ r pfadd hll2{t} b c d
+ r pfadd hll3{t} c d e
+ r pfmerge hll{t} hll1{t} hll2{t} hll3{t}
+ r pfcount hll{t}
+ } {5}
+
+ test {PFMERGE on missing source keys will create an empty destkey} {
+ r del sourcekey{t} sourcekey2{t} destkey{t} destkey2{t}
+
+ assert_equal {OK} [r pfmerge destkey{t} sourcekey{t}]
+ assert_equal 1 [r exists destkey{t}]
+ assert_equal 0 [r pfcount destkey{t}]
+
+ assert_equal {OK} [r pfmerge destkey2{t} sourcekey{t} sourcekey2{t}]
+ assert_equal 1 [r exists destkey2{t}]
+ assert_equal 0 [r pfcount destkey{t}]
+ }
+
+ test {PFMERGE with one empty input key, create an empty destkey} {
+ r del destkey
+ assert_equal {OK} [r pfmerge destkey]
+ assert_equal 1 [r exists destkey]
+ assert_equal 0 [r pfcount destkey]
+ }
+
+ test {PFMERGE with one non-empty input key, dest key is actually one of the source keys} {
+ r del destkey
+ assert_equal 1 [r pfadd destkey a b c]
+ assert_equal {OK} [r pfmerge destkey]
+ assert_equal 1 [r exists destkey]
+ assert_equal 3 [r pfcount destkey]
+ }
+
+ test {PFCOUNT multiple-keys merge returns cardinality of union #1} {
+ r del hll1{t} hll2{t} hll3{t}
+ for {set x 1} {$x < 10000} {incr x} {
+ r pfadd hll1{t} "foo-$x"
+ r pfadd hll2{t} "bar-$x"
+ r pfadd hll3{t} "zap-$x"
+
+ set card [r pfcount hll1{t} hll2{t} hll3{t}]
+ set realcard [expr {$x*3}]
+ set err [expr {abs($card-$realcard)}]
+ assert {$err < (double($card)/100)*5}
+ }
+ }
+
+ test {PFCOUNT multiple-keys merge returns cardinality of union #2} {
+ r del hll1{t} hll2{t} hll3{t}
+ set elements {}
+ for {set x 1} {$x < 10000} {incr x} {
+ for {set j 1} {$j <= 3} {incr j} {
+ set rint [randomInt 20000]
+ r pfadd hll$j{t} $rint
+ lappend elements $rint
+ }
+ }
+ set realcard [llength [lsort -unique $elements]]
+ set card [r pfcount hll1{t} hll2{t} hll3{t}]
+ set err [expr {abs($card-$realcard)}]
+ assert {$err < (double($card)/100)*5}
+ }
+
+ test {PFDEBUG GETREG returns the HyperLogLog raw registers} {
+ r del hll
+ r pfadd hll 1 2 3
+ llength [r pfdebug getreg hll]
+ } {16384} {needs:pfdebug}
+
+ test {PFADD / PFCOUNT cache invalidation works} {
+ r del hll
+ r pfadd hll a b c
+ r pfcount hll
+ assert {[r getrange hll 15 15] eq "\x00"}
+ r pfadd hll a b c
+ assert {[r getrange hll 15 15] eq "\x00"}
+ r pfadd hll 1 2 3
+ assert {[r getrange hll 15 15] eq "\x80"}
+ }
+}
diff --git a/tests/unit/info-command.tcl b/tests/unit/info-command.tcl
new file mode 100644
index 0000000..bc24ed2
--- /dev/null
+++ b/tests/unit/info-command.tcl
@@ -0,0 +1,62 @@
+start_server {tags {"info and its relative command"}} {
+ test "info command with at most one sub command" {
+ foreach arg {"" "all" "default" "everything"} {
+ if {$arg == ""} {
+ set info [r 0 info]
+ } else {
+ set info [r 0 info $arg]
+ }
+
+ assert { [string match "*redis_version*" $info] }
+ assert { [string match "*used_cpu_user*" $info] }
+ assert { ![string match "*sentinel_tilt*" $info] }
+ assert { [string match "*used_memory*" $info] }
+ if {$arg == "" || $arg == "default"} {
+ assert { ![string match "*rejected_calls*" $info] }
+ } else {
+ assert { [string match "*rejected_calls*" $info] }
+ }
+ }
+ }
+
+ test "info command with one sub-section" {
+ set info [r info cpu]
+ assert { [string match "*used_cpu_user*" $info] }
+ assert { ![string match "*sentinel_tilt*" $info] }
+ assert { ![string match "*used_memory*" $info] }
+
+ set info [r info sentinel]
+ assert { ![string match "*sentinel_tilt*" $info] }
+ assert { ![string match "*used_memory*" $info] }
+
+ set info [r info commandSTATS] ;# test case insensitive compare
+ assert { ![string match "*used_memory*" $info] }
+ assert { [string match "*rejected_calls*" $info] }
+ }
+
+ test "info command with multiple sub-sections" {
+ set info [r info cpu sentinel]
+ assert { [string match "*used_cpu_user*" $info] }
+ assert { ![string match "*sentinel_tilt*" $info] }
+ assert { ![string match "*master_repl_offset*" $info] }
+
+ set info [r info cpu all]
+ assert { [string match "*used_cpu_user*" $info] }
+ assert { ![string match "*sentinel_tilt*" $info] }
+ assert { [string match "*used_memory*" $info] }
+ assert { [string match "*master_repl_offset*" $info] }
+ assert { [string match "*rejected_calls*" $info] }
+ # check that we didn't get the same info twice
+ assert { ![string match "*used_cpu_user_children*used_cpu_user_children*" $info] }
+
+ set info [r info cpu default]
+ assert { [string match "*used_cpu_user*" $info] }
+ assert { ![string match "*sentinel_tilt*" $info] }
+ assert { [string match "*used_memory*" $info] }
+ assert { [string match "*master_repl_offset*" $info] }
+ assert { ![string match "*rejected_calls*" $info] }
+ # check that we didn't get the same info twice
+ assert { ![string match "*used_cpu_user_children*used_cpu_user_children*" $info] }
+ }
+
+}
diff --git a/tests/unit/info.tcl b/tests/unit/info.tcl
new file mode 100644
index 0000000..8127043
--- /dev/null
+++ b/tests/unit/info.tcl
@@ -0,0 +1,346 @@
+proc cmdstat {cmd} {
+ return [cmdrstat $cmd r]
+}
+
+proc errorstat {cmd} {
+ return [errorrstat $cmd r]
+}
+
+proc latency_percentiles_usec {cmd} {
+ return [latencyrstat_percentiles $cmd r]
+}
+
+start_server {tags {"info" "external:skip"}} {
+ start_server {} {
+
+ test {latencystats: disable/enable} {
+ r config resetstat
+ r CONFIG SET latency-tracking no
+ r set a b
+ assert_match {} [latency_percentiles_usec set]
+ r CONFIG SET latency-tracking yes
+ r set a b
+ assert_match {*p50=*,p99=*,p99.9=*} [latency_percentiles_usec set]
+ r config resetstat
+ assert_match {} [latency_percentiles_usec set]
+ }
+
+ test {latencystats: configure percentiles} {
+ r config resetstat
+ assert_match {} [latency_percentiles_usec set]
+ r CONFIG SET latency-tracking yes
+ r SET a b
+ r GET a
+ assert_match {*p50=*,p99=*,p99.9=*} [latency_percentiles_usec set]
+ assert_match {*p50=*,p99=*,p99.9=*} [latency_percentiles_usec get]
+ r CONFIG SET latency-tracking-info-percentiles "0.0 50.0 100.0"
+ assert_match [r config get latency-tracking-info-percentiles] {latency-tracking-info-percentiles {0 50 100}}
+ assert_match {*p0=*,p50=*,p100=*} [latency_percentiles_usec set]
+ assert_match {*p0=*,p50=*,p100=*} [latency_percentiles_usec get]
+ r config resetstat
+ assert_match {} [latency_percentiles_usec set]
+ }
+
+ test {latencystats: bad configure percentiles} {
+ r config resetstat
+ set configlatencyline [r config get latency-tracking-info-percentiles]
+ catch {r CONFIG SET latency-tracking-info-percentiles "10.0 50.0 a"} e
+ assert_match {ERR CONFIG SET failed*} $e
+ assert_equal [s total_error_replies] 1
+ assert_match [r config get latency-tracking-info-percentiles] $configlatencyline
+ catch {r CONFIG SET latency-tracking-info-percentiles "10.0 50.0 101.0"} e
+ assert_match {ERR CONFIG SET failed*} $e
+ assert_equal [s total_error_replies] 2
+ assert_match [r config get latency-tracking-info-percentiles] $configlatencyline
+ r config resetstat
+ assert_match {} [errorstat ERR]
+ }
+
+ test {latencystats: blocking commands} {
+ r config resetstat
+ r CONFIG SET latency-tracking yes
+ r CONFIG SET latency-tracking-info-percentiles "50.0 99.0 99.9"
+ set rd [redis_deferring_client]
+ r del list1{t}
+
+ $rd blpop list1{t} 0
+ wait_for_blocked_client
+ r lpush list1{t} a
+ assert_equal [$rd read] {list1{t} a}
+ $rd blpop list1{t} 0
+ wait_for_blocked_client
+ r lpush list1{t} b
+ assert_equal [$rd read] {list1{t} b}
+ assert_match {*p50=*,p99=*,p99.9=*} [latency_percentiles_usec blpop]
+ $rd close
+ }
+
+ test {latencystats: subcommands} {
+ r config resetstat
+ r CONFIG SET latency-tracking yes
+ r CONFIG SET latency-tracking-info-percentiles "50.0 99.0 99.9"
+ r client id
+
+ assert_match {*p50=*,p99=*,p99.9=*} [latency_percentiles_usec client\\|id]
+ assert_match {*p50=*,p99=*,p99.9=*} [latency_percentiles_usec config\\|set]
+ }
+
+ test {latencystats: measure latency} {
+ r config resetstat
+ r CONFIG SET latency-tracking yes
+ r CONFIG SET latency-tracking-info-percentiles "50.0"
+ r DEBUG sleep 0.05
+ r SET k v
+ set latencystatline_debug [latency_percentiles_usec debug]
+ set latencystatline_set [latency_percentiles_usec set]
+ regexp "p50=(.+\..+)" $latencystatline_debug -> p50_debug
+ regexp "p50=(.+\..+)" $latencystatline_set -> p50_set
+ assert {$p50_debug >= 50000}
+ assert {$p50_set >= 0}
+ assert {$p50_debug >= $p50_set}
+ } {} {needs:debug}
+
+ test {errorstats: failed call authentication error} {
+ r config resetstat
+ assert_match {} [errorstat ERR]
+ assert_equal [s total_error_replies] 0
+ catch {r auth k} e
+ assert_match {ERR AUTH*} $e
+ assert_match {*count=1*} [errorstat ERR]
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdstat auth]
+ assert_equal [s total_error_replies] 1
+ r config resetstat
+ assert_match {} [errorstat ERR]
+ }
+
+ test {errorstats: failed call within MULTI/EXEC} {
+ r config resetstat
+ assert_match {} [errorstat ERR]
+ assert_equal [s total_error_replies] 0
+ r multi
+ r set a b
+ r auth a
+ catch {r exec} e
+ assert_match {ERR AUTH*} $e
+ assert_match {*count=1*} [errorstat ERR]
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=0} [cmdstat set]
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdstat auth]
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=0} [cmdstat exec]
+ assert_equal [s total_error_replies] 1
+
+ # MULTI/EXEC command errors should still be pinpointed to him
+ catch {r exec} e
+ assert_match {ERR EXEC without MULTI} $e
+ assert_match {*calls=2,*,rejected_calls=0,failed_calls=1} [cmdstat exec]
+ assert_match {*count=2*} [errorstat ERR]
+ assert_equal [s total_error_replies] 2
+ }
+
+ test {errorstats: failed call within LUA} {
+ r config resetstat
+ assert_match {} [errorstat ERR]
+ assert_equal [s total_error_replies] 0
+ catch {r eval {redis.pcall('XGROUP', 'CREATECONSUMER', 's1', 'mygroup', 'consumer') return } 0} e
+ assert_match {*count=1*} [errorstat ERR]
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdstat xgroup\\|createconsumer]
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=0} [cmdstat eval]
+
+ # EVAL command errors should still be pinpointed to him
+ catch {r eval a} e
+ assert_match {ERR wrong*} $e
+ assert_match {*calls=1,*,rejected_calls=1,failed_calls=0} [cmdstat eval]
+ assert_match {*count=2*} [errorstat ERR]
+ assert_equal [s total_error_replies] 2
+ }
+
+ test {errorstats: failed call NOSCRIPT error} {
+ r config resetstat
+ assert_equal [s total_error_replies] 0
+ assert_match {} [errorstat NOSCRIPT]
+ catch {r evalsha NotValidShaSUM 0} e
+ assert_match {NOSCRIPT*} $e
+ assert_match {*count=1*} [errorstat NOSCRIPT]
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdstat evalsha]
+ assert_equal [s total_error_replies] 1
+ r config resetstat
+ assert_match {} [errorstat NOSCRIPT]
+ }
+
+ test {errorstats: failed call NOGROUP error} {
+ r config resetstat
+ assert_match {} [errorstat NOGROUP]
+ r del mystream
+ r XADD mystream * f v
+ catch {r XGROUP CREATECONSUMER mystream mygroup consumer} e
+ assert_match {NOGROUP*} $e
+ assert_match {*count=1*} [errorstat NOGROUP]
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdstat xgroup\\|createconsumer]
+ r config resetstat
+ assert_match {} [errorstat NOGROUP]
+ }
+
+ test {errorstats: rejected call unknown command} {
+ r config resetstat
+ assert_equal [s total_error_replies] 0
+ assert_match {} [errorstat ERR]
+ catch {r asdf} e
+ assert_match {ERR unknown*} $e
+ assert_match {*count=1*} [errorstat ERR]
+ assert_equal [s total_error_replies] 1
+ r config resetstat
+ assert_match {} [errorstat ERR]
+ }
+
+ test {errorstats: rejected call within MULTI/EXEC} {
+ r config resetstat
+ assert_equal [s total_error_replies] 0
+ assert_match {} [errorstat ERR]
+ r multi
+ catch {r set} e
+ assert_match {ERR wrong number of arguments for 'set' command} $e
+ catch {r exec} e
+ assert_match {EXECABORT*} $e
+ assert_match {*count=1*} [errorstat ERR]
+ assert_match {*count=1*} [errorstat EXECABORT]
+ assert_equal [s total_error_replies] 2
+ assert_match {*calls=0,*,rejected_calls=1,failed_calls=0} [cmdstat set]
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=0} [cmdstat multi]
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdstat exec]
+ assert_equal [s total_error_replies] 2
+ r config resetstat
+ assert_match {} [errorstat ERR]
+ }
+
+ test {errorstats: rejected call due to wrong arity} {
+ r config resetstat
+ assert_equal [s total_error_replies] 0
+ assert_match {} [errorstat ERR]
+ catch {r set k} e
+ assert_match {ERR wrong number of arguments for 'set' command} $e
+ assert_match {*count=1*} [errorstat ERR]
+ assert_match {*calls=0,*,rejected_calls=1,failed_calls=0} [cmdstat set]
+ # ensure that after a rejected command, valid ones are counted properly
+ r set k1 v1
+ r set k2 v2
+ assert_match {calls=2,*,rejected_calls=1,failed_calls=0} [cmdstat set]
+ assert_equal [s total_error_replies] 1
+ }
+
+ test {errorstats: rejected call by OOM error} {
+ r config resetstat
+ assert_equal [s total_error_replies] 0
+ assert_match {} [errorstat OOM]
+ r config set maxmemory 1
+ catch {r set a b} e
+ assert_match {OOM*} $e
+ assert_match {*count=1*} [errorstat OOM]
+ assert_match {*calls=0,*,rejected_calls=1,failed_calls=0} [cmdstat set]
+ assert_equal [s total_error_replies] 1
+ r config resetstat
+ assert_match {} [errorstat OOM]
+ r config set maxmemory 0
+ }
+
+ test {errorstats: rejected call by authorization error} {
+ r config resetstat
+ assert_equal [s total_error_replies] 0
+ assert_match {} [errorstat NOPERM]
+ r ACL SETUSER alice on >p1pp0 ~cached:* +get +info +config
+ r auth alice p1pp0
+ catch {r set a b} e
+ assert_match {NOPERM*} $e
+ assert_match {*count=1*} [errorstat NOPERM]
+ assert_match {*calls=0,*,rejected_calls=1,failed_calls=0} [cmdstat set]
+ assert_equal [s total_error_replies] 1
+ r config resetstat
+ assert_match {} [errorstat NOPERM]
+ r auth default ""
+ }
+
+ test {errorstats: blocking commands} {
+ r config resetstat
+ set rd [redis_deferring_client]
+ $rd client id
+ set rd_id [$rd read]
+ r del list1{t}
+
+ $rd blpop list1{t} 0
+ wait_for_blocked_client
+ r client unblock $rd_id error
+ assert_error {UNBLOCKED*} {$rd read}
+ assert_match {*count=1*} [errorstat UNBLOCKED]
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdstat blpop]
+ assert_equal [s total_error_replies] 1
+ $rd close
+ }
+
+ test {stats: eventloop metrics} {
+ set info1 [r info stats]
+ set cycle1 [getInfoProperty $info1 eventloop_cycles]
+ set el_sum1 [getInfoProperty $info1 eventloop_duration_sum]
+ set cmd_sum1 [getInfoProperty $info1 eventloop_duration_cmd_sum]
+ assert_morethan $cycle1 0
+ assert_morethan $el_sum1 0
+ assert_morethan $cmd_sum1 0
+ after 110 ;# default hz is 10, wait for a cron tick.
+ set info2 [r info stats]
+ set cycle2 [getInfoProperty $info2 eventloop_cycles]
+ set el_sum2 [getInfoProperty $info2 eventloop_duration_sum]
+ set cmd_sum2 [getInfoProperty $info2 eventloop_duration_cmd_sum]
+ if {$::verbose} { puts "eventloop metrics cycle1: $cycle1, cycle2: $cycle2" }
+ assert_morethan $cycle2 $cycle1
+ assert_lessthan $cycle2 [expr $cycle1+10] ;# we expect 2 or 3 cycles here, but allow some tolerance
+ if {$::verbose} { puts "eventloop metrics el_sum1: $el_sum1, el_sum2: $el_sum2" }
+ assert_morethan $el_sum2 $el_sum1
+ assert_lessthan $el_sum2 [expr $el_sum1+30000] ;# we expect roughly 100ms here, but allow some tolerance
+ if {$::verbose} { puts "eventloop metrics cmd_sum1: $cmd_sum1, cmd_sum2: $cmd_sum2" }
+ assert_morethan $cmd_sum2 $cmd_sum1
+ assert_lessthan $cmd_sum2 [expr $cmd_sum1+15000] ;# we expect about tens of ms here, but allow some tolerance
+ }
+
+ test {stats: instantaneous metrics} {
+ r config resetstat
+ after 1600 ;# hz is 10, wait for 16 cron tick so that sample array is fulfilled
+ set value [s instantaneous_eventloop_cycles_per_sec]
+ if {$::verbose} { puts "instantaneous metrics instantaneous_eventloop_cycles_per_sec: $value" }
+ assert_morethan $value 0
+ assert_lessthan $value 15 ;# default hz is 10
+ set value [s instantaneous_eventloop_duration_usec]
+ if {$::verbose} { puts "instantaneous metrics instantaneous_eventloop_duration_usec: $value" }
+ assert_morethan $value 0
+ assert_lessthan $value 22000 ;# default hz is 10, so duration < 1000 / 10, allow some tolerance
+ }
+
+ test {stats: debug metrics} {
+ # make sure debug info is hidden
+ set info [r info]
+ assert_equal [getInfoProperty $info eventloop_duration_aof_sum] {}
+ set info_all [r info all]
+ assert_equal [getInfoProperty $info_all eventloop_duration_aof_sum] {}
+
+ set info1 [r info debug]
+
+ set aof1 [getInfoProperty $info1 eventloop_duration_aof_sum]
+ assert {$aof1 >= 0}
+ set cron1 [getInfoProperty $info1 eventloop_duration_cron_sum]
+ assert {$cron1 > 0}
+ set cycle_max1 [getInfoProperty $info1 eventloop_cmd_per_cycle_max]
+ assert {$cycle_max1 > 0}
+ set duration_max1 [getInfoProperty $info1 eventloop_duration_max]
+ assert {$duration_max1 > 0}
+
+ after 110 ;# hz is 10, wait for a cron tick.
+ set info2 [r info debug]
+
+ set aof2 [getInfoProperty $info2 eventloop_duration_aof_sum]
+ assert {$aof2 >= $aof1} ;# AOF is disabled, we expect $aof2 == $aof1, but allow some tolerance.
+ set cron2 [getInfoProperty $info2 eventloop_duration_cron_sum]
+ assert_morethan $cron2 $cron1
+ set cycle_max2 [getInfoProperty $info2 eventloop_cmd_per_cycle_max]
+ assert {$cycle_max2 >= $cycle_max1}
+ set duration_max2 [getInfoProperty $info2 eventloop_duration_max]
+ assert {$duration_max2 >= $duration_max1}
+ }
+
+ }
+}
diff --git a/tests/unit/introspection-2.tcl b/tests/unit/introspection-2.tcl
new file mode 100644
index 0000000..89cb4b1
--- /dev/null
+++ b/tests/unit/introspection-2.tcl
@@ -0,0 +1,245 @@
+proc cmdstat {cmd} {
+ return [cmdrstat $cmd r]
+}
+
+proc getlru {key} {
+ set objinfo [r debug object $key]
+ foreach info $objinfo {
+ set kvinfo [split $info ":"]
+ if {[string compare [lindex $kvinfo 0] "lru"] == 0} {
+ return [lindex $kvinfo 1]
+ }
+ }
+ fail "Can't get LRU info with DEBUG OBJECT"
+}
+
+start_server {tags {"introspection"}} {
+ test {The microsecond part of the TIME command will not overflow} {
+ set now [r time]
+ set microseconds [lindex $now 1]
+ assert_morethan $microseconds 0
+ assert_lessthan $microseconds 1000000
+ }
+
+ test {TTL, TYPE and EXISTS do not alter the last access time of a key} {
+ r set foo bar
+ after 3000
+ r ttl foo
+ r type foo
+ r exists foo
+ assert {[r object idletime foo] >= 2}
+ }
+
+ test {TOUCH alters the last access time of a key} {
+ r set foo bar
+ after 3000
+ r touch foo
+ assert {[r object idletime foo] < 2}
+ }
+
+ test {Operations in no-touch mode do not alter the last access time of a key} {
+ r set foo bar
+ r client no-touch on
+ set oldlru [getlru foo]
+ after 1100
+ r get foo
+ set newlru [getlru foo]
+ assert_equal $newlru $oldlru
+ r client no-touch off
+ r get foo
+ set newlru [getlru foo]
+ assert_morethan $newlru $oldlru
+ } {} {needs:debug}
+
+ test {TOUCH returns the number of existing keys specified} {
+ r flushdb
+ r set key1{t} 1
+ r set key2{t} 2
+ r touch key0{t} key1{t} key2{t} key3{t}
+ } 2
+
+ test {command stats for GEOADD} {
+ r config resetstat
+ r GEOADD foo 0 0 bar
+ assert_match {*calls=1,*} [cmdstat geoadd]
+ assert_match {} [cmdstat zadd]
+ } {} {needs:config-resetstat}
+
+ test {errors stats for GEOADD} {
+ r config resetstat
+ # make sure geo command will failed
+ r set foo 1
+ assert_error {WRONGTYPE Operation against a key holding the wrong kind of value*} {r GEOADD foo 0 0 bar}
+ assert_match {*calls=1*,rejected_calls=0,failed_calls=1*} [cmdstat geoadd]
+ assert_match {} [cmdstat zadd]
+ } {} {needs:config-resetstat}
+
+ test {command stats for EXPIRE} {
+ r config resetstat
+ r SET foo bar
+ r EXPIRE foo 0
+ assert_match {*calls=1,*} [cmdstat expire]
+ assert_match {} [cmdstat del]
+ } {} {needs:config-resetstat}
+
+ test {command stats for BRPOP} {
+ r config resetstat
+ r LPUSH list foo
+ r BRPOP list 0
+ assert_match {*calls=1,*} [cmdstat brpop]
+ assert_match {} [cmdstat rpop]
+ } {} {needs:config-resetstat}
+
+ test {command stats for MULTI} {
+ r config resetstat
+ r MULTI
+ r set foo{t} bar
+ r GEOADD foo2{t} 0 0 bar
+ r EXPIRE foo2{t} 0
+ r EXEC
+ assert_match {*calls=1,*} [cmdstat multi]
+ assert_match {*calls=1,*} [cmdstat exec]
+ assert_match {*calls=1,*} [cmdstat set]
+ assert_match {*calls=1,*} [cmdstat expire]
+ assert_match {*calls=1,*} [cmdstat geoadd]
+ } {} {needs:config-resetstat}
+
+ test {command stats for scripts} {
+ r config resetstat
+ r set mykey myval
+ r eval {
+ redis.call('set', KEYS[1], 0)
+ redis.call('expire', KEYS[1], 0)
+ redis.call('geoadd', KEYS[1], 0, 0, "bar")
+ } 1 mykey
+ assert_match {*calls=1,*} [cmdstat eval]
+ assert_match {*calls=2,*} [cmdstat set]
+ assert_match {*calls=1,*} [cmdstat expire]
+ assert_match {*calls=1,*} [cmdstat geoadd]
+ } {} {needs:config-resetstat}
+
+ test {COMMAND COUNT get total number of Redis commands} {
+ assert_morethan [r command count] 0
+ }
+
+ test {COMMAND GETKEYS GET} {
+ assert_equal {key} [r command getkeys get key]
+ }
+
+ test {COMMAND GETKEYSANDFLAGS} {
+ assert_equal {{k1 {OW update}}} [r command getkeysandflags set k1 v1]
+ assert_equal {{k1 {OW update}} {k2 {OW update}}} [r command getkeysandflags mset k1 v1 k2 v2]
+ assert_equal {{k1 {RW access delete}} {k2 {RW insert}}} [r command getkeysandflags LMOVE k1 k2 left right]
+ assert_equal {{k1 {RO access}} {k2 {OW update}}} [r command getkeysandflags sort k1 store k2]
+ }
+
+ test {COMMAND GETKEYS MEMORY USAGE} {
+ assert_equal {key} [r command getkeys memory usage key]
+ }
+
+ test {COMMAND GETKEYS XGROUP} {
+ assert_equal {key} [r command getkeys xgroup create key groupname $]
+ }
+
+ test {COMMAND GETKEYS EVAL with keys} {
+ assert_equal {key} [r command getkeys eval "return 1" 1 key]
+ }
+
+ test {COMMAND GETKEYS EVAL without keys} {
+ assert_equal {} [r command getkeys eval "return 1" 0]
+ }
+
+ test {COMMAND GETKEYS LCS} {
+ assert_equal {key1 key2} [r command getkeys lcs key1 key2]
+ }
+
+ test {COMMAND GETKEYS MORE THAN 256 KEYS} {
+ set all_keys [list]
+ set numkeys 260
+ for {set i 1} {$i <= $numkeys} {incr i} {
+ lappend all_keys "key$i"
+ }
+ set all_keys_with_target [linsert $all_keys 0 target]
+ # we are using ZUNIONSTORE command since in order to reproduce allocation of a new buffer in getKeysPrepareResult
+ # when numkeys in result > 0
+ # we need a command that the final number of keys is not known in the first call to getKeysPrepareResult
+ # before the fix in that case data of old buffer was not copied to the new result buffer
+ # causing all previous keys (numkeys) data to be uninitialize
+ assert_equal $all_keys_with_target [r command getkeys ZUNIONSTORE target $numkeys {*}$all_keys]
+ }
+
+ test "COMMAND LIST syntax error" {
+ assert_error "ERR syntax error*" {r command list bad_arg}
+ assert_error "ERR syntax error*" {r command list filterby bad_arg}
+ assert_error "ERR syntax error*" {r command list filterby bad_arg bad_arg2}
+ }
+
+ test "COMMAND LIST WITHOUT FILTERBY" {
+ set commands [r command list]
+ assert_not_equal [lsearch $commands "set"] -1
+ assert_not_equal [lsearch $commands "client|list"] -1
+ }
+
+ test "COMMAND LIST FILTERBY ACLCAT against non existing category" {
+ assert_equal {} [r command list filterby aclcat non_existing_category]
+ }
+
+ test "COMMAND LIST FILTERBY ACLCAT - list all commands/subcommands" {
+ set commands [r command list filterby aclcat scripting]
+ assert_not_equal [lsearch $commands "eval"] -1
+ assert_not_equal [lsearch $commands "script|kill"] -1
+
+ # Negative check, a command that should not be here
+ assert_equal [lsearch $commands "set"] -1
+ }
+
+ test "COMMAND LIST FILTERBY PATTERN - list all commands/subcommands" {
+ # Exact command match.
+ assert_equal {set} [r command list filterby pattern set]
+ assert_equal {get} [r command list filterby pattern get]
+
+ # Return the parent command and all the subcommands below it.
+ set commands [r command list filterby pattern config*]
+ assert_not_equal [lsearch $commands "config"] -1
+ assert_not_equal [lsearch $commands "config|get"] -1
+
+ # We can filter subcommands under a parent command.
+ set commands [r command list filterby pattern config|*re*]
+ assert_not_equal [lsearch $commands "config|resetstat"] -1
+ assert_not_equal [lsearch $commands "config|rewrite"] -1
+
+ # We can filter subcommands across parent commands.
+ set commands [r command list filterby pattern cl*help]
+ assert_not_equal [lsearch $commands "client|help"] -1
+ assert_not_equal [lsearch $commands "cluster|help"] -1
+
+ # Negative check, command that doesn't exist.
+ assert_equal {} [r command list filterby pattern non_exists]
+ assert_equal {} [r command list filterby pattern non_exists*]
+ }
+
+ test "COMMAND LIST FILTERBY MODULE against non existing module" {
+ # This should be empty, the real one is in subcommands.tcl
+ assert_equal {} [r command list filterby module non_existing_module]
+ }
+
+ test {COMMAND INFO of invalid subcommands} {
+ assert_equal {{}} [r command info get|key]
+ assert_equal {{}} [r command info config|get|key]
+ }
+
+ foreach cmd {SET GET MSET BITFIELD LMOVE LPOP BLPOP PING MEMORY MEMORY|USAGE RENAME GEORADIUS_RO} {
+ test "$cmd command will not be marked with movablekeys" {
+ set info [lindex [r command info $cmd] 0]
+ assert_no_match {*movablekeys*} [lindex $info 2]
+ }
+ }
+
+ foreach cmd {ZUNIONSTORE XREAD EVAL SORT SORT_RO MIGRATE GEORADIUS} {
+ test "$cmd command is marked with movablekeys" {
+ set info [lindex [r command info $cmd] 0]
+ assert_match {*movablekeys*} [lindex $info 2]
+ }
+ }
+
+}
diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl
new file mode 100644
index 0000000..8132ee1
--- /dev/null
+++ b/tests/unit/introspection.tcl
@@ -0,0 +1,829 @@
+start_server {tags {"introspection"}} {
+ test "PING" {
+ assert_equal {PONG} [r ping]
+ assert_equal {redis} [r ping redis]
+ assert_error {*wrong number of arguments for 'ping' command} {r ping hello redis}
+ }
+
+ test {CLIENT LIST} {
+ r client list
+ } {id=* addr=*:* laddr=*:* fd=* name=* age=* idle=* flags=N db=* sub=0 psub=0 ssub=0 multi=-1 qbuf=26 qbuf-free=* argv-mem=* multi-mem=0 rbs=* rbp=* obl=0 oll=0 omem=0 tot-mem=* events=r cmd=client|list user=* redir=-1 resp=*}
+
+ test {CLIENT LIST with IDs} {
+ set myid [r client id]
+ set cl [split [r client list id $myid] "\r\n"]
+ assert_match "id=$myid * cmd=client|list *" [lindex $cl 0]
+ }
+
+ test {CLIENT INFO} {
+ r client info
+ } {id=* addr=*:* laddr=*:* fd=* name=* age=* idle=* flags=N db=* sub=0 psub=0 ssub=0 multi=-1 qbuf=26 qbuf-free=* argv-mem=* multi-mem=0 rbs=* rbp=* obl=0 oll=0 omem=0 tot-mem=* events=r cmd=client|info user=* redir=-1 resp=*}
+
+ test {CLIENT KILL with illegal arguments} {
+ assert_error "ERR wrong number of arguments for 'client|kill' command" {r client kill}
+ assert_error "ERR syntax error*" {r client kill id 10 wrong_arg}
+
+ assert_error "ERR *greater than 0*" {r client kill id str}
+ assert_error "ERR *greater than 0*" {r client kill id -1}
+ assert_error "ERR *greater than 0*" {r client kill id 0}
+
+ assert_error "ERR Unknown client type*" {r client kill type wrong_type}
+
+ assert_error "ERR No such user*" {r client kill user wrong_user}
+
+ assert_error "ERR syntax error*" {r client kill skipme yes_or_no}
+ }
+
+ test {CLIENT KILL SKIPME YES/NO will kill all clients} {
+ # Kill all clients except `me`
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+ set connected_clients [s connected_clients]
+ assert {$connected_clients >= 3}
+ set res [r client kill skipme yes]
+ assert {$res == $connected_clients - 1}
+
+ # Kill all clients, including `me`
+ set rd3 [redis_deferring_client]
+ set rd4 [redis_deferring_client]
+ set connected_clients [s connected_clients]
+ assert {$connected_clients == 3}
+ set res [r client kill skipme no]
+ assert_equal $res $connected_clients
+
+ # After killing `me`, the first ping will throw an error
+ assert_error "*I/O error*" {r ping}
+ assert_equal "PONG" [r ping]
+
+ $rd1 close
+ $rd2 close
+ $rd3 close
+ $rd4 close
+ }
+
+ test "CLIENT KILL close the client connection during bgsave" {
+ # Start a slow bgsave, trigger an active fork.
+ r flushall
+ r set k v
+ r config set rdb-key-save-delay 10000000
+ r bgsave
+ wait_for_condition 1000 10 {
+ [s rdb_bgsave_in_progress] eq 1
+ } else {
+ fail "bgsave did not start in time"
+ }
+
+ # Kill (close) the connection
+ r client kill skipme no
+
+ # In the past, client connections needed to wait for bgsave
+ # to end before actually closing, now they are closed immediately.
+ assert_error "*I/O error*" {r ping} ;# get the error very quickly
+ assert_equal "PONG" [r ping]
+
+ # Make sure the bgsave is still in progress
+ assert_equal [s rdb_bgsave_in_progress] 1
+
+ # Stop the child before we proceed to the next test
+ r config set rdb-key-save-delay 0
+ r flushall
+ wait_for_condition 1000 10 {
+ [s rdb_bgsave_in_progress] eq 0
+ } else {
+ fail "bgsave did not stop in time"
+ }
+ } {} {needs:save}
+
+ test "CLIENT REPLY OFF/ON: disable all commands reply" {
+ set rd [redis_deferring_client]
+
+ # These replies were silenced.
+ $rd client reply off
+ $rd ping pong
+ $rd ping pong2
+
+ $rd client reply on
+ assert_equal {OK} [$rd read]
+ $rd ping pong3
+ assert_equal {pong3} [$rd read]
+
+ $rd close
+ }
+
+ test "CLIENT REPLY SKIP: skip the next command reply" {
+ set rd [redis_deferring_client]
+
+ # The first pong reply was silenced.
+ $rd client reply skip
+ $rd ping pong
+
+ $rd ping pong2
+ assert_equal {pong2} [$rd read]
+
+ $rd close
+ }
+
+ test "CLIENT REPLY ON: unset SKIP flag" {
+ set rd [redis_deferring_client]
+
+ $rd client reply skip
+ $rd client reply on
+ assert_equal {OK} [$rd read] ;# OK from CLIENT REPLY ON command
+
+ $rd ping
+ assert_equal {PONG} [$rd read]
+
+ $rd close
+ }
+
+ test {MONITOR can log executed commands} {
+ set rd [redis_deferring_client]
+ $rd monitor
+ assert_match {*OK*} [$rd read]
+ r set foo bar
+ r get foo
+ set res [list [$rd read] [$rd read]]
+ $rd close
+ set _ $res
+ } {*"set" "foo"*"get" "foo"*}
+
+ test {MONITOR can log commands issued by the scripting engine} {
+ set rd [redis_deferring_client]
+ $rd monitor
+ $rd read ;# Discard the OK
+ r eval {redis.call('set',KEYS[1],ARGV[1])} 1 foo bar
+ assert_match {*eval*} [$rd read]
+ assert_match {*lua*"set"*"foo"*"bar"*} [$rd read]
+ $rd close
+ }
+
+ test {MONITOR can log commands issued by functions} {
+ r function load replace {#!lua name=test
+ redis.register_function('test', function() return redis.call('set', 'foo', 'bar') end)
+ }
+ set rd [redis_deferring_client]
+ $rd monitor
+ $rd read ;# Discard the OK
+ r fcall test 0
+ assert_match {*fcall*test*} [$rd read]
+ assert_match {*lua*"set"*"foo"*"bar"*} [$rd read]
+ $rd close
+ }
+
+ test {MONITOR supports redacting command arguments} {
+ set rd [redis_deferring_client]
+ $rd monitor
+ $rd read ; # Discard the OK
+
+ r migrate [srv 0 host] [srv 0 port] key 9 5000
+ r migrate [srv 0 host] [srv 0 port] key 9 5000 AUTH user
+ r migrate [srv 0 host] [srv 0 port] key 9 5000 AUTH2 user password
+ catch {r auth not-real} _
+ catch {r auth not-real not-a-password} _
+
+ assert_match {*"key"*"9"*"5000"*} [$rd read]
+ assert_match {*"key"*"9"*"5000"*"(redacted)"*} [$rd read]
+ assert_match {*"key"*"9"*"5000"*"(redacted)"*"(redacted)"*} [$rd read]
+ assert_match {*"auth"*"(redacted)"*} [$rd read]
+ assert_match {*"auth"*"(redacted)"*"(redacted)"*} [$rd read]
+
+ foreach resp {3 2} {
+ if {[lsearch $::denytags "resp3"] >= 0} {
+ if {$resp == 3} {continue}
+ } elseif {$::force_resp3} {
+ if {$resp == 2} {continue}
+ }
+ catch {r hello $resp AUTH not-real not-a-password} _
+ assert_match "*\"hello\"*\"$resp\"*\"AUTH\"*\"(redacted)\"*\"(redacted)\"*" [$rd read]
+ }
+ $rd close
+ } {0} {needs:repl}
+
+ test {MONITOR correctly handles multi-exec cases} {
+ set rd [redis_deferring_client]
+ $rd monitor
+ $rd read ; # Discard the OK
+
+ # Make sure multi-exec statements are ordered
+ # correctly
+ r multi
+ r set foo bar
+ r exec
+ assert_match {*"multi"*} [$rd read]
+ assert_match {*"set"*"foo"*"bar"*} [$rd read]
+ assert_match {*"exec"*} [$rd read]
+
+ # Make sure we close multi statements on errors
+ r multi
+ catch {r syntax error} _
+ catch {r exec} _
+
+ assert_match {*"multi"*} [$rd read]
+ assert_match {*"exec"*} [$rd read]
+
+ $rd close
+ }
+
+ test {MONITOR log blocked command only once} {
+
+ # need to reconnect in order to reset the clients state
+ reconnect
+
+ set rd [redis_deferring_client]
+ set bc [redis_deferring_client]
+ r del mylist
+
+ $rd monitor
+ $rd read ; # Discard the OK
+
+ $bc blpop mylist 0
+ wait_for_blocked_clients_count 1
+ r lpush mylist 1
+ wait_for_blocked_clients_count 0
+ r lpush mylist 2
+
+ # we expect to see the blpop on the monitor first
+ assert_match {*"blpop"*"mylist"*"0"*} [$rd read]
+
+ # we scan out all the info commands on the monitor
+ set monitor_output [$rd read]
+ while { [string match {*"info"*} $monitor_output] } {
+ set monitor_output [$rd read]
+ }
+
+ # we expect to locate the lpush right when the client was unblocked
+ assert_match {*"lpush"*"mylist"*"1"*} $monitor_output
+
+ # we scan out all the info commands
+ set monitor_output [$rd read]
+ while { [string match {*"info"*} $monitor_output] } {
+ set monitor_output [$rd read]
+ }
+
+ # we expect to see the next lpush and not duplicate blpop command
+ assert_match {*"lpush"*"mylist"*"2"*} $monitor_output
+
+ $rd close
+ $bc close
+ }
+
+ test {CLIENT GETNAME should return NIL if name is not assigned} {
+ r client getname
+ } {}
+
+ test {CLIENT LIST shows empty fields for unassigned names} {
+ r client list
+ } {*name= *}
+
+ test {CLIENT SETNAME does not accept spaces} {
+ catch {r client setname "foo bar"} e
+ set e
+ } {ERR*}
+
+ test {CLIENT SETNAME can assign a name to this connection} {
+ assert_equal [r client setname myname] {OK}
+ r client list
+ } {*name=myname*}
+
+ test {CLIENT SETNAME can change the name of an existing connection} {
+ assert_equal [r client setname someothername] {OK}
+ r client list
+ } {*name=someothername*}
+
+ test {After CLIENT SETNAME, connection can still be closed} {
+ set rd [redis_deferring_client]
+ $rd client setname foobar
+ assert_equal [$rd read] "OK"
+ assert_match {*foobar*} [r client list]
+ $rd close
+ # Now the client should no longer be listed
+ wait_for_condition 50 100 {
+ [string match {*foobar*} [r client list]] == 0
+ } else {
+ fail "Client still listed in CLIENT LIST after SETNAME."
+ }
+ }
+
+ test {CLIENT SETINFO can set a library name to this connection} {
+ r CLIENT SETINFO lib-name redis.py
+ r CLIENT SETINFO lib-ver 1.2.3
+ r client info
+ } {*lib-name=redis.py lib-ver=1.2.3*}
+
+ test {CLIENT SETINFO invalid args} {
+ assert_error {*wrong number of arguments*} {r CLIENT SETINFO lib-name}
+ assert_error {*cannot contain spaces*} {r CLIENT SETINFO lib-name "redis py"}
+ assert_error {*newlines*} {r CLIENT SETINFO lib-name "redis.py\n"}
+ assert_error {*Unrecognized*} {r CLIENT SETINFO badger hamster}
+ # test that all of these didn't affect the previously set values
+ r client info
+ } {*lib-name=redis.py lib-ver=1.2.3*}
+
+ test {RESET does NOT clean library name} {
+ r reset
+ r client info
+ } {*lib-name=redis.py*} {needs:reset}
+
+ test {CLIENT SETINFO can clear library name} {
+ r CLIENT SETINFO lib-name ""
+ r client info
+ } {*lib-name= *}
+
+ test {CONFIG save params special case handled properly} {
+ # No "save" keyword - defaults should apply
+ start_server {config "minimal.conf"} {
+ assert_match [r config get save] {save {3600 1 300 100 60 10000}}
+ }
+
+ # First "save" keyword overrides hard coded defaults
+ start_server {config "minimal.conf" overrides {save {100 100}}} {
+ # Defaults
+ assert_match [r config get save] {save {100 100}}
+ }
+
+ # First "save" keyword appends default from config file
+ start_server {config "default.conf" overrides {save {900 1}} args {--save 100 100}} {
+ assert_match [r config get save] {save {900 1 100 100}}
+ }
+
+ # Empty "save" keyword resets all
+ start_server {config "default.conf" overrides {save {900 1}} args {--save {}}} {
+ assert_match [r config get save] {save {}}
+ }
+ } {} {external:skip}
+
+ test {CONFIG sanity} {
+ # Do CONFIG GET, CONFIG SET and then CONFIG GET again
+ # Skip immutable configs, one with no get, and other complicated configs
+ set skip_configs {
+ rdbchecksum
+ daemonize
+ io-threads-do-reads
+ tcp-backlog
+ always-show-logo
+ syslog-enabled
+ cluster-enabled
+ disable-thp
+ aclfile
+ unixsocket
+ pidfile
+ syslog-ident
+ appendfilename
+ appenddirname
+ supervised
+ syslog-facility
+ databases
+ io-threads
+ logfile
+ unixsocketperm
+ replicaof
+ slaveof
+ requirepass
+ server_cpulist
+ bio_cpulist
+ aof_rewrite_cpulist
+ bgsave_cpulist
+ set-proc-title
+ cluster-config-file
+ cluster-port
+ oom-score-adj
+ oom-score-adj-values
+ enable-protected-configs
+ enable-debug-command
+ enable-module-command
+ dbfilename
+ logfile
+ dir
+ socket-mark-id
+ req-res-logfile
+ client-default-resp
+ }
+
+ if {!$::tls} {
+ append skip_configs {
+ tls-prefer-server-ciphers
+ tls-session-cache-timeout
+ tls-session-cache-size
+ tls-session-caching
+ tls-cert-file
+ tls-key-file
+ tls-client-cert-file
+ tls-client-key-file
+ tls-dh-params-file
+ tls-ca-cert-file
+ tls-ca-cert-dir
+ tls-protocols
+ tls-ciphers
+ tls-ciphersuites
+ tls-port
+ }
+ }
+
+ set configs {}
+ foreach {k v} [r config get *] {
+ if {[lsearch $skip_configs $k] != -1} {
+ continue
+ }
+ dict set configs $k $v
+ # try to set the config to the same value it already has
+ r config set $k $v
+ }
+
+ set newconfigs {}
+ foreach {k v} [r config get *] {
+ if {[lsearch $skip_configs $k] != -1} {
+ continue
+ }
+ dict set newconfigs $k $v
+ }
+
+ dict for {k v} $configs {
+ set vv [dict get $newconfigs $k]
+ if {$v != $vv} {
+ fail "config $k mismatch, expecting $v but got $vv"
+ }
+
+ }
+ }
+
+ # Do a force-all config rewrite and make sure we're able to parse
+ # it.
+ test {CONFIG REWRITE sanity} {
+ # Capture state of config before
+ set configs {}
+ foreach {k v} [r config get *] {
+ dict set configs $k $v
+ }
+
+ # Rewrite entire configuration, restart and confirm the
+ # server is able to parse it and start.
+ assert_equal [r debug config-rewrite-force-all] "OK"
+ restart_server 0 true false
+ wait_done_loading r
+
+ # Verify no changes were introduced
+ dict for {k v} $configs {
+ assert_equal $v [lindex [r config get $k] 1]
+ }
+ } {} {external:skip}
+
+ test {CONFIG REWRITE handles save and shutdown properly} {
+ r config set save "3600 1 300 100 60 10000"
+ r config set shutdown-on-sigterm "nosave now"
+ r config set shutdown-on-sigint "save"
+ r config rewrite
+ restart_server 0 true false
+ assert_equal [r config get save] {save {3600 1 300 100 60 10000}}
+ assert_equal [r config get shutdown-on-sigterm] {shutdown-on-sigterm {nosave now}}
+ assert_equal [r config get shutdown-on-sigint] {shutdown-on-sigint save}
+
+ r config set save ""
+ r config set shutdown-on-sigterm "default"
+ r config rewrite
+ restart_server 0 true false
+ assert_equal [r config get save] {save {}}
+ assert_equal [r config get shutdown-on-sigterm] {shutdown-on-sigterm default}
+
+ start_server {config "minimal.conf"} {
+ assert_equal [r config get save] {save {3600 1 300 100 60 10000}}
+ r config set save ""
+ r config rewrite
+ restart_server 0 true false
+ assert_equal [r config get save] {save {}}
+ }
+ } {} {external:skip}
+
+ test {CONFIG SET with multiple args} {
+ set some_configs {maxmemory 10000001 repl-backlog-size 10000002 save {3000 5}}
+
+ # Backup
+ set backups {}
+ foreach c [dict keys $some_configs] {
+ lappend backups $c [lindex [r config get $c] 1]
+ }
+
+ # multi config set and veirfy
+ assert_equal [eval "r config set $some_configs"] "OK"
+ dict for {c val} $some_configs {
+ assert_equal [lindex [r config get $c] 1] $val
+ }
+
+ # Restore backup
+ assert_equal [eval "r config set $backups"] "OK"
+ }
+
+ test {CONFIG SET rollback on set error} {
+ # This test passes an invalid percent value to maxmemory-clients which should cause an
+ # input verification failure during the "set" phase before trying to apply the
+ # configuration. We want to make sure the correct failure happens and everything
+ # is rolled back.
+ # backup maxmemory config
+ set mm_backup [lindex [r config get maxmemory] 1]
+ set mmc_backup [lindex [r config get maxmemory-clients] 1]
+ set qbl_backup [lindex [r config get client-query-buffer-limit] 1]
+ # Set some value to maxmemory
+ assert_equal [r config set maxmemory 10000002] "OK"
+ # Set another value to maxmeory together with another invalid config
+ assert_error "ERR CONFIG SET failed (possibly related to argument 'maxmemory-clients') - percentage argument must be less or equal to 100" {
+ r config set maxmemory 10000001 maxmemory-clients 200% client-query-buffer-limit invalid
+ }
+ # Validate we rolled back to original values
+ assert_equal [lindex [r config get maxmemory] 1] 10000002
+ assert_equal [lindex [r config get maxmemory-clients] 1] $mmc_backup
+ assert_equal [lindex [r config get client-query-buffer-limit] 1] $qbl_backup
+ # Make sure we revert back to the previous maxmemory
+ assert_equal [r config set maxmemory $mm_backup] "OK"
+ }
+
+ test {CONFIG SET rollback on apply error} {
+ # This test tries to configure a used port number in redis. This is expected
+ # to pass the `CONFIG SET` validity checking implementation but fail on
+ # actual "apply" of the setting. This will validate that after an "apply"
+ # failure we rollback to the previous values.
+ proc dummy_accept {chan addr port} {}
+
+ set some_configs {maxmemory 10000001 port 0 client-query-buffer-limit 10m}
+
+ # On Linux we also set the oom score adj which has an apply function. This is
+ # used to verify that even successful applies are rolled back if some other
+ # config's apply fails.
+ set oom_adj_avail [expr {!$::external && [exec uname] == "Linux"}]
+ if {$oom_adj_avail} {
+ proc get_oom_score_adj {} {
+ set pid [srv 0 pid]
+ set fd [open "/proc/$pid/oom_score_adj" "r"]
+ set val [gets $fd]
+ close $fd
+ return $val
+ }
+ set some_configs [linsert $some_configs 0 oom-score-adj yes oom-score-adj-values {1 1 1}]
+ set read_oom_adj [get_oom_score_adj]
+ }
+
+ # Backup
+ set backups {}
+ foreach c [dict keys $some_configs] {
+ lappend backups $c [lindex [r config get $c] 1]
+ }
+
+ set used_port [find_available_port $::baseport $::portcount]
+ dict set some_configs port $used_port
+
+ # Run a dummy server on used_port so we know we can't configure redis to
+ # use it. It's ok for this to fail because that means used_port is invalid
+ # anyway
+ catch {socket -server dummy_accept -myaddr 127.0.0.1 $used_port} e
+ if {$::verbose} { puts "dummy_accept: $e" }
+
+ # Try to listen on the used port, pass some more configs to make sure the
+ # returned failure message is for the first bad config and everything is rolled back.
+ assert_error "ERR CONFIG SET failed (possibly related to argument 'port') - Unable to listen on this port*" {
+ eval "r config set $some_configs"
+ }
+
+ # Make sure we reverted back to previous configs
+ dict for {conf val} $backups {
+ assert_equal [lindex [r config get $conf] 1] $val
+ }
+
+ if {$oom_adj_avail} {
+ assert_equal [get_oom_score_adj] $read_oom_adj
+ }
+
+ # Make sure we can still communicate with the server (on the original port)
+ set r1 [redis_client]
+ assert_equal [$r1 ping] "PONG"
+ $r1 close
+ }
+
+ test {CONFIG SET duplicate configs} {
+ assert_error "ERR *duplicate*" {r config set maxmemory 10000001 maxmemory 10000002}
+ }
+
+ test {CONFIG SET set immutable} {
+ assert_error "ERR *immutable*" {r config set daemonize yes}
+ }
+
+ test {CONFIG GET hidden configs} {
+ set hidden_config "key-load-delay"
+
+ # When we use a pattern we shouldn't get the hidden config
+ assert {![dict exists [r config get *] $hidden_config]}
+
+ # When we explicitly request the hidden config we should get it
+ assert {[dict exists [r config get $hidden_config] "$hidden_config"]}
+ }
+
+ test {CONFIG GET multiple args} {
+ set res [r config get maxmemory maxmemory* bind *of]
+
+ # Verify there are no duplicates in the result
+ assert_equal [expr [llength [dict keys $res]]*2] [llength $res]
+
+ # Verify we got both name and alias in result
+ assert {[dict exists $res slaveof] && [dict exists $res replicaof]}
+
+ # Verify pattern found multiple maxmemory* configs
+ assert {[dict exists $res maxmemory] && [dict exists $res maxmemory-samples] && [dict exists $res maxmemory-clients]}
+
+ # Verify we also got the explicit config
+ assert {[dict exists $res bind]}
+ }
+
+ test {redis-server command line arguments - error cases} {
+ # Take '--invalid' as the option.
+ catch {exec src/redis-server --invalid} err
+ assert_match {*Bad directive or wrong number of arguments*} $err
+
+ catch {exec src/redis-server --port} err
+ assert_match {*'port'*wrong number of arguments*} $err
+
+ catch {exec src/redis-server --port 6380 --loglevel} err
+ assert_match {*'loglevel'*wrong number of arguments*} $err
+
+ # Take `6379` and `6380` as the port option value.
+ catch {exec src/redis-server --port 6379 6380} err
+ assert_match {*'port "6379" "6380"'*wrong number of arguments*} $err
+
+ # Take `--loglevel` and `verbose` as the port option value.
+ catch {exec src/redis-server --port --loglevel verbose} err
+ assert_match {*'port "--loglevel" "verbose"'*wrong number of arguments*} $err
+
+ # Take `--bla` as the port option value.
+ catch {exec src/redis-server --port --bla --loglevel verbose} err
+ assert_match {*'port "--bla"'*argument couldn't be parsed into an integer*} $err
+
+ # Take `--bla` as the loglevel option value.
+ catch {exec src/redis-server --logfile --my--log--file --loglevel --bla} err
+ assert_match {*'loglevel "--bla"'*argument(s) must be one of the following*} $err
+
+ # Using MULTI_ARG's own check, empty option value
+ catch {exec src/redis-server --shutdown-on-sigint} err
+ assert_match {*'shutdown-on-sigint'*argument(s) must be one of the following*} $err
+ catch {exec src/redis-server --shutdown-on-sigint "now force" --shutdown-on-sigterm} err
+ assert_match {*'shutdown-on-sigterm'*argument(s) must be one of the following*} $err
+
+ # Something like `redis-server --some-config --config-value1 --config-value2 --loglevel debug` would break,
+ # because if you want to pass a value to a config starting with `--`, it can only be a single value.
+ catch {exec src/redis-server --replicaof 127.0.0.1 abc} err
+ assert_match {*'replicaof "127.0.0.1" "abc"'*Invalid master port*} $err
+ catch {exec src/redis-server --replicaof --127.0.0.1 abc} err
+ assert_match {*'replicaof "--127.0.0.1" "abc"'*Invalid master port*} $err
+ catch {exec src/redis-server --replicaof --127.0.0.1 --abc} err
+ assert_match {*'replicaof "--127.0.0.1"'*wrong number of arguments*} $err
+ } {} {external:skip}
+
+ test {redis-server command line arguments - allow passing option name and option value in the same arg} {
+ start_server {config "default.conf" args {"--maxmemory 700mb" "--maxmemory-policy volatile-lru"}} {
+ assert_match [r config get maxmemory] {maxmemory 734003200}
+ assert_match [r config get maxmemory-policy] {maxmemory-policy volatile-lru}
+ }
+ } {} {external:skip}
+
+ test {redis-server command line arguments - wrong usage that we support anyway} {
+ start_server {config "default.conf" args {loglevel verbose "--maxmemory '700mb'" "--maxmemory-policy 'volatile-lru'"}} {
+ assert_match [r config get loglevel] {loglevel verbose}
+ assert_match [r config get maxmemory] {maxmemory 734003200}
+ assert_match [r config get maxmemory-policy] {maxmemory-policy volatile-lru}
+ }
+ } {} {external:skip}
+
+ test {redis-server command line arguments - allow option value to use the `--` prefix} {
+ start_server {config "default.conf" args {--proc-title-template --my--title--template --loglevel verbose}} {
+ assert_match [r config get proc-title-template] {proc-title-template --my--title--template}
+ assert_match [r config get loglevel] {loglevel verbose}
+ }
+ } {} {external:skip}
+
+ test {redis-server command line arguments - option name and option value in the same arg and `--` prefix} {
+ start_server {config "default.conf" args {"--proc-title-template --my--title--template" "--loglevel verbose"}} {
+ assert_match [r config get proc-title-template] {proc-title-template --my--title--template}
+ assert_match [r config get loglevel] {loglevel verbose}
+ }
+ } {} {external:skip}
+
+ test {redis-server command line arguments - save with empty input} {
+ start_server {config "default.conf" args {--save --loglevel verbose}} {
+ assert_match [r config get save] {save {}}
+ assert_match [r config get loglevel] {loglevel verbose}
+ }
+
+ start_server {config "default.conf" args {--loglevel verbose --save}} {
+ assert_match [r config get save] {save {}}
+ assert_match [r config get loglevel] {loglevel verbose}
+ }
+
+ start_server {config "default.conf" args {--save {} --loglevel verbose}} {
+ assert_match [r config get save] {save {}}
+ assert_match [r config get loglevel] {loglevel verbose}
+ }
+
+ start_server {config "default.conf" args {--loglevel verbose --save {}}} {
+ assert_match [r config get save] {save {}}
+ assert_match [r config get loglevel] {loglevel verbose}
+ }
+
+ start_server {config "default.conf" args {--proc-title-template --save --save {} --loglevel verbose}} {
+ assert_match [r config get proc-title-template] {proc-title-template --save}
+ assert_match [r config get save] {save {}}
+ assert_match [r config get loglevel] {loglevel verbose}
+ }
+
+ } {} {external:skip}
+
+ test {redis-server command line arguments - take one bulk string with spaces for MULTI_ARG configs parsing} {
+ start_server {config "default.conf" args {--shutdown-on-sigint nosave force now --shutdown-on-sigterm "nosave force"}} {
+ assert_match [r config get shutdown-on-sigint] {shutdown-on-sigint {nosave now force}}
+ assert_match [r config get shutdown-on-sigterm] {shutdown-on-sigterm {nosave force}}
+ }
+ } {} {external:skip}
+
+ # Config file at this point is at a weird state, and includes all
+ # known keywords. Might be a good idea to avoid adding tests here.
+}
+
+start_server {tags {"introspection external:skip"} overrides {enable-protected-configs {no} enable-debug-command {no}}} {
+ test {cannot modify protected configuration - no} {
+ assert_error "ERR *protected*" {r config set dir somedir}
+ assert_error "ERR *DEBUG command not allowed*" {r DEBUG HELP}
+ } {} {needs:debug}
+}
+
+start_server {config "minimal.conf" tags {"introspection external:skip"} overrides {protected-mode {no} enable-protected-configs {local} enable-debug-command {local}}} {
+ test {cannot modify protected configuration - local} {
+ # verify that for local connection it doesn't error
+ r config set dbfilename somename
+ r DEBUG HELP
+
+ # Get a non-loopback address of this instance for this test.
+ set myaddr [get_nonloopback_addr]
+ if {$myaddr != "" && ![string match {127.*} $myaddr]} {
+ # Non-loopback client should fail
+ set r2 [get_nonloopback_client]
+ assert_error "ERR *protected*" {$r2 config set dir somedir}
+ assert_error "ERR *DEBUG command not allowed*" {$r2 DEBUG HELP}
+ }
+ } {} {needs:debug}
+}
+
+test {config during loading} {
+ start_server [list overrides [list key-load-delay 50 loading-process-events-interval-bytes 1024 rdbcompression no save "900 1"]] {
+ # create a big rdb that will take long to load. it is important
+ # for keys to be big since the server processes events only once in 2mb.
+ # 100mb of rdb, 100k keys will load in more than 5 seconds
+ r debug populate 100000 key 1000
+
+ restart_server 0 false false
+
+ # make sure it's still loading
+ assert_equal [s loading] 1
+
+ # verify some configs are allowed during loading
+ r config set loglevel debug
+ assert_equal [lindex [r config get loglevel] 1] debug
+
+ # verify some configs are forbidden during loading
+ assert_error {LOADING*} {r config set dir asdf}
+
+ # make sure it's still loading
+ assert_equal [s loading] 1
+
+ # no need to keep waiting for loading to complete
+ exec kill [srv 0 pid]
+ }
+} {} {external:skip}
+
+test {CONFIG REWRITE handles rename-command properly} {
+ start_server {tags {"introspection"} overrides {rename-command {flushdb badger}}} {
+ assert_error {ERR unknown command*} {r flushdb}
+
+ r config rewrite
+ restart_server 0 true false
+
+ assert_error {ERR unknown command*} {r flushdb}
+ }
+} {} {external:skip}
+
+test {CONFIG REWRITE handles alias config properly} {
+ start_server {tags {"introspection"} overrides {hash-max-listpack-entries 20 hash-max-ziplist-entries 21}} {
+ assert_equal [r config get hash-max-listpack-entries] {hash-max-listpack-entries 21}
+ assert_equal [r config get hash-max-ziplist-entries] {hash-max-ziplist-entries 21}
+ r config set hash-max-listpack-entries 100
+
+ r config rewrite
+ restart_server 0 true false
+
+ assert_equal [r config get hash-max-listpack-entries] {hash-max-listpack-entries 100}
+ }
+ # test the order doesn't matter
+ start_server {tags {"introspection"} overrides {hash-max-ziplist-entries 20 hash-max-listpack-entries 21}} {
+ assert_equal [r config get hash-max-listpack-entries] {hash-max-listpack-entries 21}
+ assert_equal [r config get hash-max-ziplist-entries] {hash-max-ziplist-entries 21}
+ r config set hash-max-listpack-entries 100
+
+ r config rewrite
+ restart_server 0 true false
+
+ assert_equal [r config get hash-max-listpack-entries] {hash-max-listpack-entries 100}
+ }
+} {} {external:skip}
diff --git a/tests/unit/keyspace.tcl b/tests/unit/keyspace.tcl
new file mode 100644
index 0000000..43690d0
--- /dev/null
+++ b/tests/unit/keyspace.tcl
@@ -0,0 +1,502 @@
+start_server {tags {"keyspace"}} {
+ test {DEL against a single item} {
+ r set x foo
+ assert {[r get x] eq "foo"}
+ r del x
+ r get x
+ } {}
+
+ test {Vararg DEL} {
+ r set foo1{t} a
+ r set foo2{t} b
+ r set foo3{t} c
+ list [r del foo1{t} foo2{t} foo3{t} foo4{t}] [r mget foo1{t} foo2{t} foo3{t}]
+ } {3 {{} {} {}}}
+
+ test {Untagged multi-key commands} {
+ r mset foo1 a foo2 b foo3 c
+ assert_equal {a b c {}} [r mget foo1 foo2 foo3 foo4]
+ r del foo1 foo2 foo3 foo4
+ } {3} {cluster:skip}
+
+ test {KEYS with pattern} {
+ foreach key {key_x key_y key_z foo_a foo_b foo_c} {
+ r set $key hello
+ }
+ lsort [r keys foo*]
+ } {foo_a foo_b foo_c}
+
+ test {KEYS to get all keys} {
+ lsort [r keys *]
+ } {foo_a foo_b foo_c key_x key_y key_z}
+
+ test {DBSIZE} {
+ r dbsize
+ } {6}
+
+ test {DEL all keys} {
+ foreach key [r keys *] {r del $key}
+ r dbsize
+ } {0}
+
+ test "DEL against expired key" {
+ r debug set-active-expire 0
+ r setex keyExpire 1 valExpire
+ after 1100
+ assert_equal 0 [r del keyExpire]
+ r debug set-active-expire 1
+ } {OK} {needs:debug}
+
+ test {EXISTS} {
+ set res {}
+ r set newkey test
+ append res [r exists newkey]
+ r del newkey
+ append res [r exists newkey]
+ } {10}
+
+ test {Zero length value in key. SET/GET/EXISTS} {
+ r set emptykey {}
+ set res [r get emptykey]
+ append res [r exists emptykey]
+ r del emptykey
+ append res [r exists emptykey]
+ } {10}
+
+ test {Commands pipelining} {
+ set fd [r channel]
+ puts -nonewline $fd "SET k1 xyzk\r\nGET k1\r\nPING\r\n"
+ flush $fd
+ set res {}
+ append res [string match OK* [r read]]
+ append res [r read]
+ append res [string match PONG* [r read]]
+ format $res
+ } {1xyzk1}
+
+ test {Non existing command} {
+ catch {r foobaredcommand} err
+ string match ERR* $err
+ } {1}
+
+ test {RENAME basic usage} {
+ r set mykey{t} hello
+ r rename mykey{t} mykey1{t}
+ r rename mykey1{t} mykey2{t}
+ r get mykey2{t}
+ } {hello}
+
+ test {RENAME source key should no longer exist} {
+ r exists mykey
+ } {0}
+
+ test {RENAME against already existing key} {
+ r set mykey{t} a
+ r set mykey2{t} b
+ r rename mykey2{t} mykey{t}
+ set res [r get mykey{t}]
+ append res [r exists mykey2{t}]
+ } {b0}
+
+ test {RENAMENX basic usage} {
+ r del mykey{t}
+ r del mykey2{t}
+ r set mykey{t} foobar
+ r renamenx mykey{t} mykey2{t}
+ set res [r get mykey2{t}]
+ append res [r exists mykey{t}]
+ } {foobar0}
+
+ test {RENAMENX against already existing key} {
+ r set mykey{t} foo
+ r set mykey2{t} bar
+ r renamenx mykey{t} mykey2{t}
+ } {0}
+
+ test {RENAMENX against already existing key (2)} {
+ set res [r get mykey{t}]
+ append res [r get mykey2{t}]
+ } {foobar}
+
+ test {RENAME against non existing source key} {
+ catch {r rename nokey{t} foobar{t}} err
+ format $err
+ } {ERR*}
+
+ test {RENAME where source and dest key are the same (existing)} {
+ r set mykey foo
+ r rename mykey mykey
+ } {OK}
+
+ test {RENAMENX where source and dest key are the same (existing)} {
+ r set mykey foo
+ r renamenx mykey mykey
+ } {0}
+
+ test {RENAME where source and dest key are the same (non existing)} {
+ r del mykey
+ catch {r rename mykey mykey} err
+ format $err
+ } {ERR*}
+
+ test {RENAME with volatile key, should move the TTL as well} {
+ r del mykey{t} mykey2{t}
+ r set mykey{t} foo
+ r expire mykey{t} 100
+ assert {[r ttl mykey{t}] > 95 && [r ttl mykey{t}] <= 100}
+ r rename mykey{t} mykey2{t}
+ assert {[r ttl mykey2{t}] > 95 && [r ttl mykey2{t}] <= 100}
+ }
+
+ test {RENAME with volatile key, should not inherit TTL of target key} {
+ r del mykey{t} mykey2{t}
+ r set mykey{t} foo
+ r set mykey2{t} bar
+ r expire mykey2{t} 100
+ assert {[r ttl mykey{t}] == -1 && [r ttl mykey2{t}] > 0}
+ r rename mykey{t} mykey2{t}
+ r ttl mykey2{t}
+ } {-1}
+
+ test {DEL all keys again (DB 0)} {
+ foreach key [r keys *] {
+ r del $key
+ }
+ r dbsize
+ } {0}
+
+ test {DEL all keys again (DB 1)} {
+ r select 10
+ foreach key [r keys *] {
+ r del $key
+ }
+ set res [r dbsize]
+ r select 9
+ format $res
+ } {0} {singledb:skip}
+
+ test {COPY basic usage for string} {
+ r set mykey{t} foobar
+ set res {}
+ r copy mykey{t} mynewkey{t}
+ lappend res [r get mynewkey{t}]
+ lappend res [r dbsize]
+ if {$::singledb} {
+ assert_equal [list foobar 2] [format $res]
+ } else {
+ r copy mykey{t} mynewkey{t} DB 10
+ r select 10
+ lappend res [r get mynewkey{t}]
+ lappend res [r dbsize]
+ r select 9
+ assert_equal [list foobar 2 foobar 1] [format $res]
+ }
+ }
+
+ test {COPY for string does not replace an existing key without REPLACE option} {
+ r set mykey2{t} hello
+ catch {r copy mykey2{t} mynewkey{t} DB 10} e
+ set e
+ } {0} {singledb:skip}
+
+ test {COPY for string can replace an existing key with REPLACE option} {
+ r copy mykey2{t} mynewkey{t} DB 10 REPLACE
+ r select 10
+ r get mynewkey{t}
+ } {hello} {singledb:skip}
+
+ test {COPY for string ensures that copied data is independent of copying data} {
+ r flushdb
+ r select 9
+ r set mykey{t} foobar
+ set res {}
+ r copy mykey{t} mynewkey{t} DB 10
+ r select 10
+ lappend res [r get mynewkey{t}]
+ r set mynewkey{t} hoge
+ lappend res [r get mynewkey{t}]
+ r select 9
+ lappend res [r get mykey{t}]
+ r select 10
+ r flushdb
+ r select 9
+ format $res
+ } [list foobar hoge foobar] {singledb:skip}
+
+ test {COPY for string does not copy data to no-integer DB} {
+ r set mykey{t} foobar
+ catch {r copy mykey{t} mynewkey{t} DB notanumber} e
+ set e
+ } {ERR value is not an integer or out of range}
+
+ test {COPY can copy key expire metadata as well} {
+ r set mykey{t} foobar ex 100
+ r copy mykey{t} mynewkey{t} REPLACE
+ assert {[r ttl mynewkey{t}] > 0 && [r ttl mynewkey{t}] <= 100}
+ assert {[r get mynewkey{t}] eq "foobar"}
+ }
+
+ test {COPY does not create an expire if it does not exist} {
+ r set mykey{t} foobar
+ assert {[r ttl mykey{t}] == -1}
+ r copy mykey{t} mynewkey{t} REPLACE
+ assert {[r ttl mynewkey{t}] == -1}
+ assert {[r get mynewkey{t}] eq "foobar"}
+ }
+
+source "tests/unit/type/list-common.tcl"
+foreach {type large} [array get largevalue] {
+ set origin_config [config_get_set list-max-listpack-size -1]
+ test "COPY basic usage for list - $type" {
+ r del mylist{t} mynewlist{t}
+ r lpush mylist{t} a b $large c d
+ assert_encoding $type mylist{t}
+ r copy mylist{t} mynewlist{t}
+ assert_encoding $type mynewlist{t}
+ set digest [debug_digest_value mylist{t}]
+ assert_equal $digest [debug_digest_value mynewlist{t}]
+ assert_refcount 1 mylist{t}
+ assert_refcount 1 mynewlist{t}
+ r del mylist{t}
+ assert_equal $digest [debug_digest_value mynewlist{t}]
+ }
+ config_set list-max-listpack-size $origin_config
+}
+
+ foreach type {intset listpack hashtable} {
+ test {COPY basic usage for $type set} {
+ r del set1{t} newset1{t}
+ r sadd set1{t} 1 2 3
+ if {$type ne "intset"} {
+ r sadd set1{t} a
+ }
+ if {$type eq "hashtable"} {
+ for {set i 4} {$i < 200} {incr i} {
+ r sadd set1{t} $i
+ }
+ }
+ assert_encoding $type set1{t}
+ r copy set1{t} newset1{t}
+ set digest [debug_digest_value set1{t}]
+ assert_equal $digest [debug_digest_value newset1{t}]
+ assert_refcount 1 set1{t}
+ assert_refcount 1 newset1{t}
+ r del set1{t}
+ assert_equal $digest [debug_digest_value newset1{t}]
+ }
+ }
+
+ test {COPY basic usage for listpack sorted set} {
+ r del zset1{t} newzset1{t}
+ r zadd zset1{t} 123 foobar
+ assert_encoding listpack zset1{t}
+ r copy zset1{t} newzset1{t}
+ set digest [debug_digest_value zset1{t}]
+ assert_equal $digest [debug_digest_value newzset1{t}]
+ assert_refcount 1 zset1{t}
+ assert_refcount 1 newzset1{t}
+ r del zset1{t}
+ assert_equal $digest [debug_digest_value newzset1{t}]
+ }
+
+ test {COPY basic usage for skiplist sorted set} {
+ r del zset2{t} newzset2{t}
+ set original_max [lindex [r config get zset-max-ziplist-entries] 1]
+ r config set zset-max-ziplist-entries 0
+ for {set j 0} {$j < 130} {incr j} {
+ r zadd zset2{t} [randomInt 50] ele-[randomInt 10]
+ }
+ assert_encoding skiplist zset2{t}
+ r copy zset2{t} newzset2{t}
+ set digest [debug_digest_value zset2{t}]
+ assert_equal $digest [debug_digest_value newzset2{t}]
+ assert_refcount 1 zset2{t}
+ assert_refcount 1 newzset2{t}
+ r del zset2{t}
+ assert_equal $digest [debug_digest_value newzset2{t}]
+ r config set zset-max-ziplist-entries $original_max
+ }
+
+ test {COPY basic usage for listpack hash} {
+ r del hash1{t} newhash1{t}
+ r hset hash1{t} tmp 17179869184
+ assert_encoding listpack hash1{t}
+ r copy hash1{t} newhash1{t}
+ set digest [debug_digest_value hash1{t}]
+ assert_equal $digest [debug_digest_value newhash1{t}]
+ assert_refcount 1 hash1{t}
+ assert_refcount 1 newhash1{t}
+ r del hash1{t}
+ assert_equal $digest [debug_digest_value newhash1{t}]
+ }
+
+ test {COPY basic usage for hashtable hash} {
+ r del hash2{t} newhash2{t}
+ set original_max [lindex [r config get hash-max-ziplist-entries] 1]
+ r config set hash-max-ziplist-entries 0
+ for {set i 0} {$i < 64} {incr i} {
+ r hset hash2{t} [randomValue] [randomValue]
+ }
+ assert_encoding hashtable hash2{t}
+ r copy hash2{t} newhash2{t}
+ set digest [debug_digest_value hash2{t}]
+ assert_equal $digest [debug_digest_value newhash2{t}]
+ assert_refcount 1 hash2{t}
+ assert_refcount 1 newhash2{t}
+ r del hash2{t}
+ assert_equal $digest [debug_digest_value newhash2{t}]
+ r config set hash-max-ziplist-entries $original_max
+ }
+
+ test {COPY basic usage for stream} {
+ r del mystream{t} mynewstream{t}
+ for {set i 0} {$i < 1000} {incr i} {
+ r XADD mystream{t} * item 2 value b
+ }
+ r copy mystream{t} mynewstream{t}
+ set digest [debug_digest_value mystream{t}]
+ assert_equal $digest [debug_digest_value mynewstream{t}]
+ assert_refcount 1 mystream{t}
+ assert_refcount 1 mynewstream{t}
+ r del mystream{t}
+ assert_equal $digest [debug_digest_value mynewstream{t}]
+ }
+
+ test {COPY basic usage for stream-cgroups} {
+ r del x{t}
+ r XADD x{t} 100 a 1
+ set id [r XADD x{t} 101 b 1]
+ r XADD x{t} 102 c 1
+ r XADD x{t} 103 e 1
+ r XADD x{t} 104 f 1
+ r XADD x{t} 105 g 1
+ r XGROUP CREATE x{t} g1 0
+ r XGROUP CREATE x{t} g2 0
+ r XREADGROUP GROUP g1 Alice COUNT 1 STREAMS x{t} >
+ r XREADGROUP GROUP g1 Bob COUNT 1 STREAMS x{t} >
+ r XREADGROUP GROUP g1 Bob NOACK COUNT 1 STREAMS x{t} >
+ r XREADGROUP GROUP g2 Charlie COUNT 4 STREAMS x{t} >
+ r XGROUP SETID x{t} g1 $id
+ r XREADGROUP GROUP g1 Dave COUNT 3 STREAMS x{t} >
+ r XDEL x{t} 103
+
+ r copy x{t} newx{t}
+ set info [r xinfo stream x{t} full]
+ assert_equal $info [r xinfo stream newx{t} full]
+ assert_refcount 1 x{t}
+ assert_refcount 1 newx{t}
+ r del x{t}
+ assert_equal $info [r xinfo stream newx{t} full]
+ r flushdb
+ }
+
+ test {MOVE basic usage} {
+ r set mykey foobar
+ r move mykey 10
+ set res {}
+ lappend res [r exists mykey]
+ lappend res [r dbsize]
+ r select 10
+ lappend res [r get mykey]
+ lappend res [r dbsize]
+ r select 9
+ format $res
+ } [list 0 0 foobar 1] {singledb:skip}
+
+ test {MOVE against key existing in the target DB} {
+ r set mykey hello
+ r move mykey 10
+ } {0} {singledb:skip}
+
+ test {MOVE against non-integer DB (#1428)} {
+ r set mykey hello
+ catch {r move mykey notanumber} e
+ set e
+ } {ERR value is not an integer or out of range} {singledb:skip}
+
+ test {MOVE can move key expire metadata as well} {
+ r select 10
+ r flushdb
+ r select 9
+ r set mykey foo ex 100
+ r move mykey 10
+ assert {[r ttl mykey] == -2}
+ r select 10
+ assert {[r ttl mykey] > 0 && [r ttl mykey] <= 100}
+ assert {[r get mykey] eq "foo"}
+ r select 9
+ } {OK} {singledb:skip}
+
+ test {MOVE does not create an expire if it does not exist} {
+ r select 10
+ r flushdb
+ r select 9
+ r set mykey foo
+ r move mykey 10
+ assert {[r ttl mykey] == -2}
+ r select 10
+ assert {[r ttl mykey] == -1}
+ assert {[r get mykey] eq "foo"}
+ r select 9
+ } {OK} {singledb:skip}
+
+ test {SET/GET keys in different DBs} {
+ r set a hello
+ r set b world
+ r select 10
+ r set a foo
+ r set b bared
+ r select 9
+ set res {}
+ lappend res [r get a]
+ lappend res [r get b]
+ r select 10
+ lappend res [r get a]
+ lappend res [r get b]
+ r select 9
+ format $res
+ } {hello world foo bared} {singledb:skip}
+
+ test {RANDOMKEY} {
+ r flushdb
+ r set foo x
+ r set bar y
+ set foo_seen 0
+ set bar_seen 0
+ for {set i 0} {$i < 100} {incr i} {
+ set rkey [r randomkey]
+ if {$rkey eq {foo}} {
+ set foo_seen 1
+ }
+ if {$rkey eq {bar}} {
+ set bar_seen 1
+ }
+ }
+ list $foo_seen $bar_seen
+ } {1 1}
+
+ test {RANDOMKEY against empty DB} {
+ r flushdb
+ r randomkey
+ } {}
+
+ test {RANDOMKEY regression 1} {
+ r flushdb
+ r set x 10
+ r del x
+ r randomkey
+ } {}
+
+ test {KEYS * two times with long key, Github issue #1208} {
+ r flushdb
+ r set dlskeriewrioeuwqoirueioqwrueoqwrueqw test
+ r keys *
+ r keys *
+ } {dlskeriewrioeuwqoirueioqwrueoqwrueqw}
+
+ test {Regression for pattern matching long nested loops} {
+ r flushdb
+ r SET aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 1
+ r KEYS "a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*a*b"
+ } {}
+}
diff --git a/tests/unit/latency-monitor.tcl b/tests/unit/latency-monitor.tcl
new file mode 100644
index 0000000..499fece
--- /dev/null
+++ b/tests/unit/latency-monitor.tcl
@@ -0,0 +1,166 @@
+start_server {tags {"latency-monitor needs:latency"}} {
+ # Set a threshold high enough to avoid spurious latency events.
+ r config set latency-monitor-threshold 200
+ r latency reset
+
+ test {LATENCY HISTOGRAM with empty histogram} {
+ r config resetstat
+ set histo [dict create {*}[r latency histogram]]
+ # Config resetstat is recorded
+ assert_equal [dict size $histo] 1
+ assert_match {*config|resetstat*} $histo
+ }
+
+ test {LATENCY HISTOGRAM all commands} {
+ r config resetstat
+ r set a b
+ r set c d
+ set histo [dict create {*}[r latency histogram]]
+ assert_match {calls 2 histogram_usec *} [dict get $histo set]
+ assert_match {calls 1 histogram_usec *} [dict get $histo "config|resetstat"]
+ }
+
+ test {LATENCY HISTOGRAM sub commands} {
+ r config resetstat
+ r client id
+ r client list
+ # parent command reply with its sub commands
+ set histo [dict create {*}[r latency histogram client]]
+ assert {[dict size $histo] == 2}
+ assert_match {calls 1 histogram_usec *} [dict get $histo "client|id"]
+ assert_match {calls 1 histogram_usec *} [dict get $histo "client|list"]
+
+ # explicitly ask for one sub-command
+ set histo [dict create {*}[r latency histogram "client|id"]]
+ assert {[dict size $histo] == 1}
+ assert_match {calls 1 histogram_usec *} [dict get $histo "client|id"]
+ }
+
+ test {LATENCY HISTOGRAM with a subset of commands} {
+ r config resetstat
+ r set a b
+ r set c d
+ r get a
+ r hset f k v
+ r hgetall f
+ set histo [dict create {*}[r latency histogram set hset]]
+ assert_match {calls 2 histogram_usec *} [dict get $histo set]
+ assert_match {calls 1 histogram_usec *} [dict get $histo hset]
+ assert_equal [dict size $histo] 2
+ set histo [dict create {*}[r latency histogram hgetall get zadd]]
+ assert_match {calls 1 histogram_usec *} [dict get $histo hgetall]
+ assert_match {calls 1 histogram_usec *} [dict get $histo get]
+ assert_equal [dict size $histo] 2
+ }
+
+ test {LATENCY HISTOGRAM command} {
+ r config resetstat
+ r set a b
+ r get a
+ assert {[llength [r latency histogram set get]] == 4}
+ }
+
+ test {LATENCY HISTOGRAM with wrong command name skips the invalid one} {
+ r config resetstat
+ assert {[llength [r latency histogram blabla]] == 0}
+ assert {[llength [r latency histogram blabla blabla2 set get]] == 0}
+ r set a b
+ r get a
+ assert_match {calls 1 histogram_usec *} [lindex [r latency histogram blabla blabla2 set get] 1]
+ assert_match {calls 1 histogram_usec *} [lindex [r latency histogram blabla blabla2 set get] 3]
+ assert {[string length [r latency histogram blabla set get]] > 0}
+ }
+
+tags {"needs:debug"} {
+ test {Test latency events logging} {
+ r debug sleep 0.3
+ after 1100
+ r debug sleep 0.4
+ after 1100
+ r debug sleep 0.5
+ assert {[r latency history command] >= 3}
+ }
+
+ test {LATENCY HISTORY output is ok} {
+ set min 250
+ set max 450
+ foreach event [r latency history command] {
+ lassign $event time latency
+ if {!$::no_latency} {
+ assert {$latency >= $min && $latency <= $max}
+ }
+ incr min 100
+ incr max 100
+ set last_time $time ; # Used in the next test
+ }
+ }
+
+ test {LATENCY LATEST output is ok} {
+ foreach event [r latency latest] {
+ lassign $event eventname time latency max
+ assert {$eventname eq "command"}
+ if {!$::no_latency} {
+ assert {$max >= 450 & $max <= 650}
+ assert {$time == $last_time}
+ }
+ break
+ }
+ }
+
+ test {LATENCY GRAPH can output the event graph} {
+ set res [r latency graph command]
+ assert_match {*command*high*low*} $res
+
+ # These numbers are taken from the "Test latency events logging" test.
+ # (debug sleep 0.3) and (debug sleep 0.5), using range to prevent timing issue.
+ regexp "command - high (.*?) ms, low (.*?) ms" $res -> high low
+ assert_morethan_equal $high 500
+ assert_morethan_equal $low 300
+ }
+} ;# tag
+
+ test {LATENCY of expire events are correctly collected} {
+ r config set latency-monitor-threshold 20
+ r flushdb
+ if {$::valgrind} {set count 100000} else {set count 1000000}
+ r eval {
+ local i = 0
+ while (i < tonumber(ARGV[1])) do
+ redis.call('sadd',KEYS[1],i)
+ i = i+1
+ end
+ } 1 mybigkey $count
+ r pexpire mybigkey 50
+ wait_for_condition 5 100 {
+ [r dbsize] == 0
+ } else {
+ fail "key wasn't expired"
+ }
+ assert_match {*expire-cycle*} [r latency latest]
+
+ test {LATENCY GRAPH can output the expire event graph} {
+ assert_match {*expire-cycle*high*low*} [r latency graph expire-cycle]
+ }
+
+ r config set latency-monitor-threshold 200
+ }
+
+ test {LATENCY HISTORY / RESET with wrong event name is fine} {
+ assert {[llength [r latency history blabla]] == 0}
+ assert {[r latency reset blabla] == 0}
+ }
+
+ test {LATENCY DOCTOR produces some output} {
+ assert {[string length [r latency doctor]] > 0}
+ }
+
+ test {LATENCY RESET is able to reset events} {
+ assert {[r latency reset] > 0}
+ assert {[r latency latest] eq {}}
+ }
+
+ test {LATENCY HELP should not have unexpected options} {
+ catch {r LATENCY help xxx} e
+ assert_match "*wrong number of arguments for 'latency|help' command" $e
+ }
+}
diff --git a/tests/unit/lazyfree.tcl b/tests/unit/lazyfree.tcl
new file mode 100644
index 0000000..17f4600
--- /dev/null
+++ b/tests/unit/lazyfree.tcl
@@ -0,0 +1,90 @@
+start_server {tags {"lazyfree"}} {
+ test "UNLINK can reclaim memory in background" {
+ set orig_mem [s used_memory]
+ set args {}
+ for {set i 0} {$i < 100000} {incr i} {
+ lappend args $i
+ }
+ r sadd myset {*}$args
+ assert {[r scard myset] == 100000}
+ set peak_mem [s used_memory]
+ assert {[r unlink myset] == 1}
+ assert {$peak_mem > $orig_mem+1000000}
+ wait_for_condition 50 100 {
+ [s used_memory] < $peak_mem &&
+ [s used_memory] < $orig_mem*2
+ } else {
+ fail "Memory is not reclaimed by UNLINK"
+ }
+ }
+
+ test "FLUSHDB ASYNC can reclaim memory in background" {
+ # make the previous test is really done before sampling used_memory
+ wait_lazyfree_done r
+
+ set orig_mem [s used_memory]
+ set args {}
+ for {set i 0} {$i < 100000} {incr i} {
+ lappend args $i
+ }
+ r sadd myset {*}$args
+ assert {[r scard myset] == 100000}
+ set peak_mem [s used_memory]
+ r flushdb async
+ assert {$peak_mem > $orig_mem+1000000}
+ wait_for_condition 50 100 {
+ [s used_memory] < $peak_mem &&
+ [s used_memory] < $orig_mem*2
+ } else {
+ fail "Memory is not reclaimed by FLUSHDB ASYNC"
+ }
+ }
+
+ test "lazy free a stream with all types of metadata" {
+ # make the previous test is really done before doing RESETSTAT
+ wait_for_condition 50 100 {
+ [s lazyfree_pending_objects] == 0
+ } else {
+ fail "lazyfree isn't done"
+ }
+
+ r config resetstat
+ r config set stream-node-max-entries 5
+ for {set j 0} {$j < 1000} {incr j} {
+ if {rand() < 0.9} {
+ r xadd stream * foo $j
+ } else {
+ r xadd stream * bar $j
+ }
+ }
+ r xgroup create stream mygroup 0
+ set records [r xreadgroup GROUP mygroup Alice COUNT 2 STREAMS stream >]
+ r xdel stream [lindex [lindex [lindex [lindex $records 0] 1] 1] 0]
+ r xack stream mygroup [lindex [lindex [lindex [lindex $records 0] 1] 0] 0]
+ r unlink stream
+
+ # make sure it was lazy freed
+ wait_for_condition 50 100 {
+ [s lazyfree_pending_objects] == 0
+ } else {
+ fail "lazyfree isn't done"
+ }
+ assert_equal [s lazyfreed_objects] 1
+ } {} {needs:config-resetstat}
+
+ test "lazy free a stream with deleted cgroup" {
+ r config resetstat
+ r xadd s * a b
+ r xgroup create s bla $
+ r xgroup destroy s bla
+ r unlink s
+
+ # make sure it was not lazy freed
+ wait_for_condition 50 100 {
+ [s lazyfree_pending_objects] == 0
+ } else {
+ fail "lazyfree isn't done"
+ }
+ assert_equal [s lazyfreed_objects] 0
+ } {} {needs:config-resetstat}
+}
diff --git a/tests/unit/limits.tcl b/tests/unit/limits.tcl
new file mode 100644
index 0000000..3af1519
--- /dev/null
+++ b/tests/unit/limits.tcl
@@ -0,0 +1,21 @@
+start_server {tags {"limits network external:skip"} overrides {maxclients 10}} {
+ if {$::tls} {
+ set expected_code "*I/O error*"
+ } else {
+ set expected_code "*ERR max*reached*"
+ }
+ test {Check if maxclients works refusing connections} {
+ set c 0
+ catch {
+ while {$c < 50} {
+ incr c
+ set rd [redis_deferring_client]
+ $rd ping
+ $rd read
+ after 100
+ }
+ } e
+ assert {$c > 8 && $c <= 10}
+ set e
+ } $expected_code
+}
diff --git a/tests/unit/maxmemory.tcl b/tests/unit/maxmemory.tcl
new file mode 100644
index 0000000..89eaf9b
--- /dev/null
+++ b/tests/unit/maxmemory.tcl
@@ -0,0 +1,590 @@
+start_server {tags {"maxmemory" "external:skip"}} {
+ r config set maxmemory 11mb
+ r config set maxmemory-policy allkeys-lru
+ set server_pid [s process_id]
+
+ proc init_test {client_eviction} {
+ r flushdb
+
+ set prev_maxmemory_clients [r config get maxmemory-clients]
+ if $client_eviction {
+ r config set maxmemory-clients 3mb
+ r client no-evict on
+ } else {
+ r config set maxmemory-clients 0
+ }
+
+ r config resetstat
+ # fill 5mb using 50 keys of 100kb
+ for {set j 0} {$j < 50} {incr j} {
+ r setrange $j 100000 x
+ }
+ assert_equal [r dbsize] 50
+ }
+
+ # Return true if the eviction occurred (client or key) based on argument
+ proc check_eviction_test {client_eviction} {
+ set evicted_keys [s evicted_keys]
+ set evicted_clients [s evicted_clients]
+ set dbsize [r dbsize]
+
+ if $client_eviction {
+ return [expr $evicted_clients > 0 && $evicted_keys == 0 && $dbsize == 50]
+ } else {
+ return [expr $evicted_clients == 0 && $evicted_keys > 0 && $dbsize < 50]
+ }
+ }
+
+ # Assert the eviction test passed (and prints some debug info on verbose)
+ proc verify_eviction_test {client_eviction} {
+ set evicted_keys [s evicted_keys]
+ set evicted_clients [s evicted_clients]
+ set dbsize [r dbsize]
+
+ if $::verbose {
+ puts "evicted keys: $evicted_keys"
+ puts "evicted clients: $evicted_clients"
+ puts "dbsize: $dbsize"
+ }
+
+ assert [check_eviction_test $client_eviction]
+ }
+
+ foreach {client_eviction} {false true} {
+ set clients {}
+ test "eviction due to output buffers of many MGET clients, client eviction: $client_eviction" {
+ init_test $client_eviction
+
+ for {set j 0} {$j < 20} {incr j} {
+ set rr [redis_deferring_client]
+ lappend clients $rr
+ }
+
+ # Generate client output buffers via MGET until we can observe some effect on
+ # keys / client eviction, or we time out.
+ set t [clock seconds]
+ while {![check_eviction_test $client_eviction] && [expr [clock seconds] - $t] < 20} {
+ foreach rr $clients {
+ if {[catch {
+ $rr mget 1
+ $rr flush
+ } err]} {
+ lremove clients $rr
+ }
+ }
+ }
+
+ verify_eviction_test $client_eviction
+ }
+ foreach rr $clients {
+ $rr close
+ }
+
+ set clients {}
+ test "eviction due to input buffer of a dead client, client eviction: $client_eviction" {
+ init_test $client_eviction
+
+ for {set j 0} {$j < 30} {incr j} {
+ set rr [redis_deferring_client]
+ lappend clients $rr
+ }
+
+ foreach rr $clients {
+ if {[catch {
+ $rr write "*250\r\n"
+ for {set j 0} {$j < 249} {incr j} {
+ $rr write "\$1000\r\n"
+ $rr write [string repeat x 1000]
+ $rr write "\r\n"
+ $rr flush
+ }
+ }]} {
+ lremove clients $rr
+ }
+ }
+
+ verify_eviction_test $client_eviction
+ }
+ foreach rr $clients {
+ $rr close
+ }
+
+ set clients {}
+ test "eviction due to output buffers of pubsub, client eviction: $client_eviction" {
+ init_test $client_eviction
+
+ for {set j 0} {$j < 20} {incr j} {
+ set rr [redis_client]
+ lappend clients $rr
+ }
+
+ foreach rr $clients {
+ $rr subscribe bla
+ }
+
+ # Generate client output buffers via PUBLISH until we can observe some effect on
+ # keys / client eviction, or we time out.
+ set bigstr [string repeat x 100000]
+ set t [clock seconds]
+ while {![check_eviction_test $client_eviction] && [expr [clock seconds] - $t] < 20} {
+ if {[catch { r publish bla $bigstr } err]} {
+ if $::verbose {
+ puts "Error publishing: $err"
+ }
+ }
+ }
+
+ verify_eviction_test $client_eviction
+ }
+ foreach rr $clients {
+ $rr close
+ }
+ }
+
+}
+
+start_server {tags {"maxmemory external:skip"}} {
+ test "Without maxmemory small integers are shared" {
+ r config set maxmemory 0
+ r set a 1
+ assert_refcount_morethan a 1
+ }
+
+ test "With maxmemory and non-LRU policy integers are still shared" {
+ r config set maxmemory 1073741824
+ r config set maxmemory-policy allkeys-random
+ r set a 1
+ assert_refcount_morethan a 1
+ }
+
+ test "With maxmemory and LRU policy integers are not shared" {
+ r config set maxmemory 1073741824
+ r config set maxmemory-policy allkeys-lru
+ r set a 1
+ r config set maxmemory-policy volatile-lru
+ r set b 1
+ assert_refcount 1 a
+ assert_refcount 1 b
+ r config set maxmemory 0
+ }
+
+ foreach policy {
+ allkeys-random allkeys-lru allkeys-lfu volatile-lru volatile-lfu volatile-random volatile-ttl
+ } {
+ test "maxmemory - is the memory limit honoured? (policy $policy)" {
+ # make sure to start with a blank instance
+ r flushall
+ # Get the current memory limit and calculate a new limit.
+ # We just add 100k to the current memory size so that it is
+ # fast for us to reach that limit.
+ set used [s used_memory]
+ set limit [expr {$used+100*1024}]
+ r config set maxmemory $limit
+ r config set maxmemory-policy $policy
+ # Now add keys until the limit is almost reached.
+ set numkeys 0
+ while 1 {
+ r setex [randomKey] 10000 x
+ incr numkeys
+ if {[s used_memory]+4096 > $limit} {
+ assert {$numkeys > 10}
+ break
+ }
+ }
+ # If we add the same number of keys already added again, we
+ # should still be under the limit.
+ for {set j 0} {$j < $numkeys} {incr j} {
+ r setex [randomKey] 10000 x
+ }
+ assert {[s used_memory] < ($limit+4096)}
+ }
+ }
+
+ foreach policy {
+ allkeys-random allkeys-lru volatile-lru volatile-random volatile-ttl
+ } {
+ test "maxmemory - only allkeys-* should remove non-volatile keys ($policy)" {
+ # make sure to start with a blank instance
+ r flushall
+ # Get the current memory limit and calculate a new limit.
+ # We just add 100k to the current memory size so that it is
+ # fast for us to reach that limit.
+ set used [s used_memory]
+ set limit [expr {$used+100*1024}]
+ r config set maxmemory $limit
+ r config set maxmemory-policy $policy
+ # Now add keys until the limit is almost reached.
+ set numkeys 0
+ while 1 {
+ r set [randomKey] x
+ incr numkeys
+ if {[s used_memory]+4096 > $limit} {
+ assert {$numkeys > 10}
+ break
+ }
+ }
+ # If we add the same number of keys already added again and
+ # the policy is allkeys-* we should still be under the limit.
+ # Otherwise we should see an error reported by Redis.
+ set err 0
+ for {set j 0} {$j < $numkeys} {incr j} {
+ if {[catch {r set [randomKey] x} e]} {
+ if {[string match {*used memory*} $e]} {
+ set err 1
+ }
+ }
+ }
+ if {[string match allkeys-* $policy]} {
+ assert {[s used_memory] < ($limit+4096)}
+ } else {
+ assert {$err == 1}
+ }
+ }
+ }
+
+ foreach policy {
+ volatile-lru volatile-lfu volatile-random volatile-ttl
+ } {
+ test "maxmemory - policy $policy should only remove volatile keys." {
+ # make sure to start with a blank instance
+ r flushall
+ # Get the current memory limit and calculate a new limit.
+ # We just add 100k to the current memory size so that it is
+ # fast for us to reach that limit.
+ set used [s used_memory]
+ set limit [expr {$used+100*1024}]
+ r config set maxmemory $limit
+ r config set maxmemory-policy $policy
+ # Now add keys until the limit is almost reached.
+ set numkeys 0
+ while 1 {
+ # Odd keys are volatile
+ # Even keys are non volatile
+ if {$numkeys % 2} {
+ r setex "key:$numkeys" 10000 x
+ } else {
+ r set "key:$numkeys" x
+ }
+ if {[s used_memory]+4096 > $limit} {
+ assert {$numkeys > 10}
+ break
+ }
+ incr numkeys
+ }
+ # Now we add the same number of volatile keys already added.
+ # We expect Redis to evict only volatile keys in order to make
+ # space.
+ set err 0
+ for {set j 0} {$j < $numkeys} {incr j} {
+ catch {r setex "foo:$j" 10000 x}
+ }
+ # We should still be under the limit.
+ assert {[s used_memory] < ($limit+4096)}
+ # However all our non volatile keys should be here.
+ for {set j 0} {$j < $numkeys} {incr j 2} {
+ assert {[r exists "key:$j"]}
+ }
+ }
+ }
+}
+
+# Calculate query buffer memory of slave
+proc slave_query_buffer {srv} {
+ set clients [split [$srv client list] "\r\n"]
+ set c [lsearch -inline $clients *flags=S*]
+ if {[string length $c] > 0} {
+ assert {[regexp {qbuf=([0-9]+)} $c - qbuf]}
+ assert {[regexp {qbuf-free=([0-9]+)} $c - qbuf_free]}
+ return [expr $qbuf + $qbuf_free]
+ }
+ return 0
+}
+
+proc test_slave_buffers {test_name cmd_count payload_len limit_memory pipeline} {
+ start_server {tags {"maxmemory external:skip"}} {
+ start_server {} {
+ set slave_pid [s process_id]
+ test "$test_name" {
+ set slave [srv 0 client]
+ set slave_host [srv 0 host]
+ set slave_port [srv 0 port]
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+
+ # Disable slow log for master to avoid memory growth in slow env.
+ $master config set slowlog-log-slower-than -1
+
+ # add 100 keys of 100k (10MB total)
+ for {set j 0} {$j < 100} {incr j} {
+ $master setrange "key:$j" 100000 asdf
+ }
+
+ # make sure master doesn't disconnect slave because of timeout
+ $master config set repl-timeout 1200 ;# 20 minutes (for valgrind and slow machines)
+ $master config set maxmemory-policy allkeys-random
+ $master config set client-output-buffer-limit "replica 100000000 100000000 300"
+ $master config set repl-backlog-size [expr {10*1024}]
+
+ # disable latency tracking
+ $master config set latency-tracking no
+ $slave config set latency-tracking no
+
+ $slave slaveof $master_host $master_port
+ wait_for_condition 50 100 {
+ [s 0 master_link_status] eq {up}
+ } else {
+ fail "Replication not started."
+ }
+
+ # measure used memory after the slave connected and set maxmemory
+ set orig_used [s -1 used_memory]
+ set orig_client_buf [s -1 mem_clients_normal]
+ set orig_mem_not_counted_for_evict [s -1 mem_not_counted_for_evict]
+ set orig_used_no_repl [expr {$orig_used - $orig_mem_not_counted_for_evict}]
+ set limit [expr {$orig_used - $orig_mem_not_counted_for_evict + 32*1024}]
+
+ if {$limit_memory==1} {
+ $master config set maxmemory $limit
+ }
+
+ # put the slave to sleep
+ set rd_slave [redis_deferring_client]
+ pause_process $slave_pid
+
+ # send some 10mb worth of commands that don't increase the memory usage
+ if {$pipeline == 1} {
+ set rd_master [redis_deferring_client -1]
+ for {set k 0} {$k < $cmd_count} {incr k} {
+ $rd_master setrange key:0 0 [string repeat A $payload_len]
+ }
+ for {set k 0} {$k < $cmd_count} {incr k} {
+ $rd_master read
+ }
+ } else {
+ for {set k 0} {$k < $cmd_count} {incr k} {
+ $master setrange key:0 0 [string repeat A $payload_len]
+ }
+ }
+
+ set new_used [s -1 used_memory]
+ set slave_buf [s -1 mem_clients_slaves]
+ set client_buf [s -1 mem_clients_normal]
+ set mem_not_counted_for_evict [s -1 mem_not_counted_for_evict]
+ set used_no_repl [expr {$new_used - $mem_not_counted_for_evict - [slave_query_buffer $master]}]
+ # we need to exclude replies buffer and query buffer of replica from used memory.
+ # removing the replica (output) buffers is done so that we are able to measure any other
+ # changes to the used memory and see that they're insignificant (the test's purpose is to check that
+ # the replica buffers are counted correctly, so the used memory growth after deducting them
+ # should be nearly 0).
+ # we remove the query buffers because on slow test platforms, they can accumulate many ACKs.
+ set delta [expr {($used_no_repl - $client_buf) - ($orig_used_no_repl - $orig_client_buf)}]
+
+ assert {[$master dbsize] == 100}
+ assert {$slave_buf > 2*1024*1024} ;# some of the data may have been pushed to the OS buffers
+ set delta_max [expr {$cmd_count / 2}] ;# 1 byte unaccounted for, with 1M commands will consume some 1MB
+ assert {$delta < $delta_max && $delta > -$delta_max}
+
+ $master client kill type slave
+ set info_str [$master info memory]
+ set killed_used [getInfoProperty $info_str used_memory]
+ set killed_mem_not_counted_for_evict [getInfoProperty $info_str mem_not_counted_for_evict]
+ set killed_slave_buf [s -1 mem_clients_slaves]
+ # we need to exclude replies buffer and query buffer of slave from used memory after kill slave
+ set killed_used_no_repl [expr {$killed_used - $killed_mem_not_counted_for_evict - [slave_query_buffer $master]}]
+ set delta_no_repl [expr {$killed_used_no_repl - $used_no_repl}]
+ assert {[$master dbsize] == 100}
+ assert {$killed_slave_buf == 0}
+ assert {$delta_no_repl > -$delta_max && $delta_no_repl < $delta_max}
+
+ }
+ # unfreeze slave process (after the 'test' succeeded or failed, but before we attempt to terminate the server
+ resume_process $slave_pid
+ }
+ }
+}
+
+# test that slave buffer are counted correctly
+# we wanna use many small commands, and we don't wanna wait long
+# so we need to use a pipeline (redis_deferring_client)
+# that may cause query buffer to fill and induce eviction, so we disable it
+test_slave_buffers {slave buffer are counted correctly} 1000000 10 0 1
+
+# test that slave buffer don't induce eviction
+# test again with fewer (and bigger) commands without pipeline, but with eviction
+test_slave_buffers "replica buffer don't induce eviction" 100000 100 1 0
+
+start_server {tags {"maxmemory external:skip"}} {
+ test {Don't rehash if used memory exceeds maxmemory after rehash} {
+ r config set latency-tracking no
+ r config set maxmemory 0
+ r config set maxmemory-policy allkeys-random
+
+ # Next rehash size is 8192, that will eat 64k memory
+ populate 4096 "" 1
+
+ set used [s used_memory]
+ set limit [expr {$used + 10*1024}]
+ r config set maxmemory $limit
+ r set k1 v1
+ # Next writing command will trigger evicting some keys if last
+ # command trigger DB dict rehash
+ r set k2 v2
+ # There must be 4098 keys because redis doesn't evict keys.
+ r dbsize
+ } {4098}
+}
+
+start_server {tags {"maxmemory external:skip"}} {
+ test {client tracking don't cause eviction feedback loop} {
+ r config set latency-tracking no
+ r config set maxmemory 0
+ r config set maxmemory-policy allkeys-lru
+ r config set maxmemory-eviction-tenacity 100
+
+ # 10 clients listening on tracking messages
+ set clients {}
+ for {set j 0} {$j < 10} {incr j} {
+ lappend clients [redis_deferring_client]
+ }
+ foreach rd $clients {
+ $rd HELLO 3
+ $rd read ; # Consume the HELLO reply
+ $rd CLIENT TRACKING on
+ $rd read ; # Consume the CLIENT reply
+ }
+
+ # populate 300 keys, with long key name and short value
+ for {set j 0} {$j < 300} {incr j} {
+ set key $j[string repeat x 1000]
+ r set $key x
+
+ # for each key, enable caching for this key
+ foreach rd $clients {
+ $rd get $key
+ $rd read
+ }
+ }
+
+ # we need to wait one second for the client querybuf excess memory to be
+ # trimmed by cron, otherwise the INFO used_memory and CONFIG maxmemory
+ # below (on slow machines) won't be "atomic" and won't trigger eviction.
+ after 1100
+
+ # set the memory limit which will cause a few keys to be evicted
+ # we need to make sure to evict keynames of a total size of more than
+ # 16kb since the (PROTO_REPLY_CHUNK_BYTES), only after that the
+ # invalidation messages have a chance to trigger further eviction.
+ set used [s used_memory]
+ set limit [expr {$used - 40000}]
+ r config set maxmemory $limit
+
+ # make sure some eviction happened
+ set evicted [s evicted_keys]
+ if {$::verbose} { puts "evicted: $evicted" }
+
+ # make sure we didn't drain the database
+ assert_range [r dbsize] 200 300
+
+ assert_range $evicted 10 50
+ foreach rd $clients {
+ $rd read ;# make sure we have some invalidation message waiting
+ $rd close
+ }
+
+ # eviction continues (known problem described in #8069)
+ # for now this test only make sures the eviction loop itself doesn't
+ # have feedback loop
+ set evicted [s evicted_keys]
+ if {$::verbose} { puts "evicted: $evicted" }
+ }
+}
+
+start_server {tags {"maxmemory" "external:skip"}} {
+ test {propagation with eviction} {
+ set repl [attach_to_replication_stream]
+
+ r set asdf1 1
+ r set asdf2 2
+ r set asdf3 3
+
+ r config set maxmemory-policy allkeys-lru
+ r config set maxmemory 1
+
+ wait_for_condition 5000 10 {
+ [r dbsize] eq 0
+ } else {
+ fail "Not all keys have been evicted"
+ }
+
+ r config set maxmemory 0
+ r config set maxmemory-policy noeviction
+
+ r set asdf4 4
+
+ assert_replication_stream $repl {
+ {select *}
+ {set asdf1 1}
+ {set asdf2 2}
+ {set asdf3 3}
+ {del asdf*}
+ {del asdf*}
+ {del asdf*}
+ {set asdf4 4}
+ }
+ close_replication_stream $repl
+
+ r config set maxmemory 0
+ r config set maxmemory-policy noeviction
+ }
+}
+
+start_server {tags {"maxmemory" "external:skip"}} {
+ test {propagation with eviction in MULTI} {
+ set repl [attach_to_replication_stream]
+
+ r config set maxmemory-policy allkeys-lru
+
+ r multi
+ r incr x
+ r config set maxmemory 1
+ r incr x
+ assert_equal [r exec] {1 OK 2}
+
+ wait_for_condition 5000 10 {
+ [r dbsize] eq 0
+ } else {
+ fail "Not all keys have been evicted"
+ }
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incr x}
+ {incr x}
+ {exec}
+ {del x}
+ }
+ close_replication_stream $repl
+
+ r config set maxmemory 0
+ r config set maxmemory-policy noeviction
+ }
+}
+
+start_server {tags {"maxmemory" "external:skip"}} {
+ test {lru/lfu value of the key just added} {
+ r config set maxmemory-policy allkeys-lru
+ r set foo a
+ assert {[r object idletime foo] <= 2}
+ r del foo
+ r set foo 1
+ r get foo
+ assert {[r object idletime foo] <= 2}
+
+ r config set maxmemory-policy allkeys-lfu
+ r del foo
+ r set foo a
+ assert {[r object freq foo] == 5}
+ }
+}
diff --git a/tests/unit/memefficiency.tcl b/tests/unit/memefficiency.tcl
new file mode 100644
index 0000000..d821c7a
--- /dev/null
+++ b/tests/unit/memefficiency.tcl
@@ -0,0 +1,580 @@
+proc test_memory_efficiency {range} {
+ r flushall
+ set rd [redis_deferring_client]
+ set base_mem [s used_memory]
+ set written 0
+ for {set j 0} {$j < 10000} {incr j} {
+ set key key:$j
+ set val [string repeat A [expr {int(rand()*$range)}]]
+ $rd set $key $val
+ incr written [string length $key]
+ incr written [string length $val]
+ incr written 2 ;# A separator is the minimum to store key-value data.
+ }
+ for {set j 0} {$j < 10000} {incr j} {
+ $rd read ; # Discard replies
+ }
+
+ set current_mem [s used_memory]
+ set used [expr {$current_mem-$base_mem}]
+ set efficiency [expr {double($written)/$used}]
+ return $efficiency
+}
+
+start_server {tags {"memefficiency external:skip"}} {
+ foreach {size_range expected_min_efficiency} {
+ 32 0.15
+ 64 0.25
+ 128 0.35
+ 1024 0.75
+ 16384 0.82
+ } {
+ test "Memory efficiency with values in range $size_range" {
+ set efficiency [test_memory_efficiency $size_range]
+ assert {$efficiency >= $expected_min_efficiency}
+ }
+ }
+}
+
+run_solo {defrag} {
+start_server {tags {"defrag external:skip"} overrides {appendonly yes auto-aof-rewrite-percentage 0 save ""}} {
+ if {[string match {*jemalloc*} [s mem_allocator]] && [r debug mallctl arenas.page] <= 8192} {
+ test "Active defrag" {
+ r config set hz 100
+ r config set activedefrag no
+ r config set active-defrag-threshold-lower 5
+ r config set active-defrag-cycle-min 65
+ r config set active-defrag-cycle-max 75
+ r config set active-defrag-ignore-bytes 2mb
+ r config set maxmemory 100mb
+ r config set maxmemory-policy allkeys-lru
+
+ populate 700000 asdf1 150
+ populate 170000 asdf2 300
+ after 120 ;# serverCron only updates the info once in 100ms
+ set frag [s allocator_frag_ratio]
+ if {$::verbose} {
+ puts "frag $frag"
+ }
+ assert {$frag >= 1.4}
+
+ r config set latency-monitor-threshold 5
+ r latency reset
+ r config set maxmemory 110mb ;# prevent further eviction (not to fail the digest test)
+ set digest [debug_digest]
+ catch {r config set activedefrag yes} e
+ if {[r config get activedefrag] eq "activedefrag yes"} {
+ # Wait for the active defrag to start working (decision once a
+ # second).
+ wait_for_condition 50 100 {
+ [s active_defrag_running] ne 0
+ } else {
+ fail "defrag not started."
+ }
+
+ # Wait for the active defrag to stop working.
+ wait_for_condition 2000 100 {
+ [s active_defrag_running] eq 0
+ } else {
+ after 120 ;# serverCron only updates the info once in 100ms
+ puts [r info memory]
+ puts [r memory malloc-stats]
+ fail "defrag didn't stop."
+ }
+
+ # Test the fragmentation is lower.
+ after 120 ;# serverCron only updates the info once in 100ms
+ set frag [s allocator_frag_ratio]
+ set max_latency 0
+ foreach event [r latency latest] {
+ lassign $event eventname time latency max
+ if {$eventname == "active-defrag-cycle"} {
+ set max_latency $max
+ }
+ }
+ if {$::verbose} {
+ puts "frag $frag"
+ set misses [s active_defrag_misses]
+ set hits [s active_defrag_hits]
+ puts "hits: $hits"
+ puts "misses: $misses"
+ puts "max latency $max_latency"
+ puts [r latency latest]
+ puts [r latency history active-defrag-cycle]
+ }
+ assert {$frag < 1.1}
+ # due to high fragmentation, 100hz, and active-defrag-cycle-max set to 75,
+ # we expect max latency to be not much higher than 7.5ms but due to rare slowness threshold is set higher
+ if {!$::no_latency} {
+ assert {$max_latency <= 30}
+ }
+ }
+ # verify the data isn't corrupted or changed
+ set newdigest [debug_digest]
+ assert {$digest eq $newdigest}
+ r save ;# saving an rdb iterates over all the data / pointers
+
+ # if defrag is supported, test AOF loading too
+ if {[r config get activedefrag] eq "activedefrag yes"} {
+ test "Active defrag - AOF loading" {
+ # reset stats and load the AOF file
+ r config resetstat
+ r config set key-load-delay -25 ;# sleep on average 1/25 usec
+ r debug loadaof
+ r config set activedefrag no
+ # measure hits and misses right after aof loading
+ set misses [s active_defrag_misses]
+ set hits [s active_defrag_hits]
+
+ after 120 ;# serverCron only updates the info once in 100ms
+ set frag [s allocator_frag_ratio]
+ set max_latency 0
+ foreach event [r latency latest] {
+ lassign $event eventname time latency max
+ if {$eventname == "while-blocked-cron"} {
+ set max_latency $max
+ }
+ }
+ if {$::verbose} {
+ puts "AOF loading:"
+ puts "frag $frag"
+ puts "hits: $hits"
+ puts "misses: $misses"
+ puts "max latency $max_latency"
+ puts [r latency latest]
+ puts [r latency history "while-blocked-cron"]
+ }
+ # make sure we had defrag hits during AOF loading
+ assert {$hits > 100000}
+ # make sure the defragger did enough work to keep the fragmentation low during loading.
+ # we cannot check that it went all the way down, since we don't wait for full defrag cycle to complete.
+ assert {$frag < 1.4}
+ # since the AOF contains simple (fast) SET commands (and the cron during loading runs every 1024 commands),
+ # it'll still not block the loading for long periods of time.
+ if {!$::no_latency} {
+ assert {$max_latency <= 40}
+ }
+ }
+ } ;# Active defrag - AOF loading
+ }
+ r config set appendonly no
+ r config set key-load-delay 0
+
+ test "Active defrag eval scripts" {
+ r flushdb
+ r script flush sync
+ r config resetstat
+ r config set hz 100
+ r config set activedefrag no
+ r config set active-defrag-threshold-lower 5
+ r config set active-defrag-cycle-min 65
+ r config set active-defrag-cycle-max 75
+ r config set active-defrag-ignore-bytes 1500kb
+ r config set maxmemory 0
+
+ set n 50000
+
+ # Populate memory with interleaving script-key pattern of same size
+ set dummy_script "--[string repeat x 400]\nreturn "
+ set rd [redis_deferring_client]
+ for {set j 0} {$j < $n} {incr j} {
+ set val "$dummy_script[format "%06d" $j]"
+ $rd script load $val
+ $rd set k$j $val
+ }
+ for {set j 0} {$j < $n} {incr j} {
+ $rd read ; # Discard script load replies
+ $rd read ; # Discard set replies
+ }
+ after 120 ;# serverCron only updates the info once in 100ms
+ if {$::verbose} {
+ puts "used [s allocator_allocated]"
+ puts "rss [s allocator_active]"
+ puts "frag [s allocator_frag_ratio]"
+ puts "frag_bytes [s allocator_frag_bytes]"
+ }
+ assert_lessthan [s allocator_frag_ratio] 1.05
+
+ # Delete all the keys to create fragmentation
+ for {set j 0} {$j < $n} {incr j} { $rd del k$j }
+ for {set j 0} {$j < $n} {incr j} { $rd read } ; # Discard del replies
+ $rd close
+ after 120 ;# serverCron only updates the info once in 100ms
+ if {$::verbose} {
+ puts "used [s allocator_allocated]"
+ puts "rss [s allocator_active]"
+ puts "frag [s allocator_frag_ratio]"
+ puts "frag_bytes [s allocator_frag_bytes]"
+ }
+ assert_morethan [s allocator_frag_ratio] 1.4
+
+ catch {r config set activedefrag yes} e
+ if {[r config get activedefrag] eq "activedefrag yes"} {
+
+ # wait for the active defrag to start working (decision once a second)
+ wait_for_condition 50 100 {
+ [s active_defrag_running] ne 0
+ } else {
+ fail "defrag not started."
+ }
+
+ # wait for the active defrag to stop working
+ wait_for_condition 500 100 {
+ [s active_defrag_running] eq 0
+ } else {
+ after 120 ;# serverCron only updates the info once in 100ms
+ puts [r info memory]
+ puts [r memory malloc-stats]
+ fail "defrag didn't stop."
+ }
+
+ # test the fragmentation is lower
+ after 120 ;# serverCron only updates the info once in 100ms
+ if {$::verbose} {
+ puts "used [s allocator_allocated]"
+ puts "rss [s allocator_active]"
+ puts "frag [s allocator_frag_ratio]"
+ puts "frag_bytes [s allocator_frag_bytes]"
+ }
+ assert_lessthan_equal [s allocator_frag_ratio] 1.05
+ }
+ # Flush all script to make sure we don't crash after defragging them
+ r script flush sync
+ } {OK}
+
+ test "Active defrag big keys" {
+ r flushdb
+ r config resetstat
+ r config set hz 100
+ r config set activedefrag no
+ r config set active-defrag-max-scan-fields 1000
+ r config set active-defrag-threshold-lower 5
+ r config set active-defrag-cycle-min 65
+ r config set active-defrag-cycle-max 75
+ r config set active-defrag-ignore-bytes 2mb
+ r config set maxmemory 0
+ r config set list-max-ziplist-size 5 ;# list of 10k items will have 2000 quicklist nodes
+ r config set stream-node-max-entries 5
+ r hmset hash h1 v1 h2 v2 h3 v3
+ r lpush list a b c d
+ r zadd zset 0 a 1 b 2 c 3 d
+ r sadd set a b c d
+ r xadd stream * item 1 value a
+ r xadd stream * item 2 value b
+ r xgroup create stream mygroup 0
+ r xreadgroup GROUP mygroup Alice COUNT 1 STREAMS stream >
+
+ # create big keys with 10k items
+ set rd [redis_deferring_client]
+ for {set j 0} {$j < 10000} {incr j} {
+ $rd hset bighash $j [concat "asdfasdfasdf" $j]
+ $rd lpush biglist [concat "asdfasdfasdf" $j]
+ $rd zadd bigzset $j [concat "asdfasdfasdf" $j]
+ $rd sadd bigset [concat "asdfasdfasdf" $j]
+ $rd xadd bigstream * item 1 value a
+ }
+ for {set j 0} {$j < 50000} {incr j} {
+ $rd read ; # Discard replies
+ }
+
+ set expected_frag 1.7
+ if {$::accurate} {
+ # scale the hash to 1m fields in order to have a measurable the latency
+ for {set j 10000} {$j < 1000000} {incr j} {
+ $rd hset bighash $j [concat "asdfasdfasdf" $j]
+ }
+ for {set j 10000} {$j < 1000000} {incr j} {
+ $rd read ; # Discard replies
+ }
+ # creating that big hash, increased used_memory, so the relative frag goes down
+ set expected_frag 1.3
+ }
+
+ # add a mass of string keys
+ for {set j 0} {$j < 500000} {incr j} {
+ $rd setrange $j 150 a
+ }
+ for {set j 0} {$j < 500000} {incr j} {
+ $rd read ; # Discard replies
+ }
+ assert_equal [r dbsize] 500010
+
+ # create some fragmentation
+ for {set j 0} {$j < 500000} {incr j 2} {
+ $rd del $j
+ }
+ for {set j 0} {$j < 500000} {incr j 2} {
+ $rd read ; # Discard replies
+ }
+ assert_equal [r dbsize] 250010
+
+ # start defrag
+ after 120 ;# serverCron only updates the info once in 100ms
+ set frag [s allocator_frag_ratio]
+ if {$::verbose} {
+ puts "frag $frag"
+ }
+ assert {$frag >= $expected_frag}
+ r config set latency-monitor-threshold 5
+ r latency reset
+
+ set digest [debug_digest]
+ catch {r config set activedefrag yes} e
+ if {[r config get activedefrag] eq "activedefrag yes"} {
+ # wait for the active defrag to start working (decision once a second)
+ wait_for_condition 50 100 {
+ [s active_defrag_running] ne 0
+ } else {
+ fail "defrag not started."
+ }
+
+ # wait for the active defrag to stop working
+ wait_for_condition 500 100 {
+ [s active_defrag_running] eq 0
+ } else {
+ after 120 ;# serverCron only updates the info once in 100ms
+ puts [r info memory]
+ puts [r memory malloc-stats]
+ fail "defrag didn't stop."
+ }
+
+ # test the fragmentation is lower
+ after 120 ;# serverCron only updates the info once in 100ms
+ set frag [s allocator_frag_ratio]
+ set max_latency 0
+ foreach event [r latency latest] {
+ lassign $event eventname time latency max
+ if {$eventname == "active-defrag-cycle"} {
+ set max_latency $max
+ }
+ }
+ if {$::verbose} {
+ puts "frag $frag"
+ set misses [s active_defrag_misses]
+ set hits [s active_defrag_hits]
+ puts "hits: $hits"
+ puts "misses: $misses"
+ puts "max latency $max_latency"
+ puts [r latency latest]
+ puts [r latency history active-defrag-cycle]
+ }
+ assert {$frag < 1.1}
+ # due to high fragmentation, 100hz, and active-defrag-cycle-max set to 75,
+ # we expect max latency to be not much higher than 7.5ms but due to rare slowness threshold is set higher
+ if {!$::no_latency} {
+ assert {$max_latency <= 30}
+ }
+ }
+ # verify the data isn't corrupted or changed
+ set newdigest [debug_digest]
+ assert {$digest eq $newdigest}
+ r save ;# saving an rdb iterates over all the data / pointers
+ } {OK}
+
+ test "Active defrag big list" {
+ r flushdb
+ r config resetstat
+ r config set hz 100
+ r config set activedefrag no
+ r config set active-defrag-max-scan-fields 1000
+ r config set active-defrag-threshold-lower 5
+ r config set active-defrag-cycle-min 65
+ r config set active-defrag-cycle-max 75
+ r config set active-defrag-ignore-bytes 2mb
+ r config set maxmemory 0
+ r config set list-max-ziplist-size 5 ;# list of 500k items will have 100k quicklist nodes
+
+ # create big keys with 10k items
+ set rd [redis_deferring_client]
+
+ set expected_frag 1.7
+ # add a mass of list nodes to two lists (allocations are interlaced)
+ set val [string repeat A 100] ;# 5 items of 100 bytes puts us in the 640 bytes bin, which has 32 regs, so high potential for fragmentation
+ set elements 500000
+ for {set j 0} {$j < $elements} {incr j} {
+ $rd lpush biglist1 $val
+ $rd lpush biglist2 $val
+ }
+ for {set j 0} {$j < $elements} {incr j} {
+ $rd read ; # Discard replies
+ $rd read ; # Discard replies
+ }
+
+ # create some fragmentation
+ r del biglist2
+
+ # start defrag
+ after 120 ;# serverCron only updates the info once in 100ms
+ set frag [s allocator_frag_ratio]
+ if {$::verbose} {
+ puts "frag $frag"
+ }
+
+ assert {$frag >= $expected_frag}
+ r config set latency-monitor-threshold 5
+ r latency reset
+
+ set digest [debug_digest]
+ catch {r config set activedefrag yes} e
+ if {[r config get activedefrag] eq "activedefrag yes"} {
+ # wait for the active defrag to start working (decision once a second)
+ wait_for_condition 50 100 {
+ [s active_defrag_running] ne 0
+ } else {
+ fail "defrag not started."
+ }
+
+ # wait for the active defrag to stop working
+ wait_for_condition 500 100 {
+ [s active_defrag_running] eq 0
+ } else {
+ after 120 ;# serverCron only updates the info once in 100ms
+ puts [r info memory]
+ puts [r info stats]
+ puts [r memory malloc-stats]
+ fail "defrag didn't stop."
+ }
+
+ # test the fragmentation is lower
+ after 120 ;# serverCron only updates the info once in 100ms
+ set misses [s active_defrag_misses]
+ set hits [s active_defrag_hits]
+ set frag [s allocator_frag_ratio]
+ set max_latency 0
+ foreach event [r latency latest] {
+ lassign $event eventname time latency max
+ if {$eventname == "active-defrag-cycle"} {
+ set max_latency $max
+ }
+ }
+ if {$::verbose} {
+ puts "frag $frag"
+ puts "misses: $misses"
+ puts "hits: $hits"
+ puts "max latency $max_latency"
+ puts [r latency latest]
+ puts [r latency history active-defrag-cycle]
+ }
+ assert {$frag < 1.1}
+ # due to high fragmentation, 100hz, and active-defrag-cycle-max set to 75,
+ # we expect max latency to be not much higher than 7.5ms but due to rare slowness threshold is set higher
+ if {!$::no_latency} {
+ assert {$max_latency <= 30}
+ }
+
+ # in extreme cases of stagnation, we see over 20m misses before the tests aborts with "defrag didn't stop",
+ # in normal cases we only see 100k misses out of 500k elements
+ assert {$misses < $elements}
+ }
+ # verify the data isn't corrupted or changed
+ set newdigest [debug_digest]
+ assert {$digest eq $newdigest}
+ r save ;# saving an rdb iterates over all the data / pointers
+ r del biglist1 ;# coverage for quicklistBookmarksClear
+ } {1}
+
+ test "Active defrag edge case" {
+ # there was an edge case in defrag where all the slabs of a certain bin are exact the same
+ # % utilization, with the exception of the current slab from which new allocations are made
+ # if the current slab is lower in utilization the defragger would have ended up in stagnation,
+ # kept running and not move any allocation.
+ # this test is more consistent on a fresh server with no history
+ start_server {tags {"defrag"} overrides {save ""}} {
+ r flushdb
+ r config resetstat
+ r config set hz 100
+ r config set activedefrag no
+ r config set active-defrag-max-scan-fields 1000
+ r config set active-defrag-threshold-lower 5
+ r config set active-defrag-cycle-min 65
+ r config set active-defrag-cycle-max 75
+ r config set active-defrag-ignore-bytes 1mb
+ r config set maxmemory 0
+ set expected_frag 1.3
+
+ r debug mallctl-str thread.tcache.flush VOID
+ # fill the first slab containin 32 regs of 640 bytes.
+ for {set j 0} {$j < 32} {incr j} {
+ r setrange "_$j" 600 x
+ r debug mallctl-str thread.tcache.flush VOID
+ }
+
+ # add a mass of keys with 600 bytes values, fill the bin of 640 bytes which has 32 regs per slab.
+ set rd [redis_deferring_client]
+ set keys 640000
+ for {set j 0} {$j < $keys} {incr j} {
+ $rd setrange $j 600 x
+ }
+ for {set j 0} {$j < $keys} {incr j} {
+ $rd read ; # Discard replies
+ }
+
+ # create some fragmentation of 50%
+ set sent 0
+ for {set j 0} {$j < $keys} {incr j 1} {
+ $rd del $j
+ incr sent
+ incr j 1
+ }
+ for {set j 0} {$j < $sent} {incr j} {
+ $rd read ; # Discard replies
+ }
+
+ # create higher fragmentation in the first slab
+ for {set j 10} {$j < 32} {incr j} {
+ r del "_$j"
+ }
+
+ # start defrag
+ after 120 ;# serverCron only updates the info once in 100ms
+ set frag [s allocator_frag_ratio]
+ if {$::verbose} {
+ puts "frag $frag"
+ }
+
+ assert {$frag >= $expected_frag}
+
+ set digest [debug_digest]
+ catch {r config set activedefrag yes} e
+ if {[r config get activedefrag] eq "activedefrag yes"} {
+ # wait for the active defrag to start working (decision once a second)
+ wait_for_condition 50 100 {
+ [s active_defrag_running] ne 0
+ } else {
+ fail "defrag not started."
+ }
+
+ # wait for the active defrag to stop working
+ wait_for_condition 500 100 {
+ [s active_defrag_running] eq 0
+ } else {
+ after 120 ;# serverCron only updates the info once in 100ms
+ puts [r info memory]
+ puts [r info stats]
+ puts [r memory malloc-stats]
+ fail "defrag didn't stop."
+ }
+
+ # test the fragmentation is lower
+ after 120 ;# serverCron only updates the info once in 100ms
+ set misses [s active_defrag_misses]
+ set hits [s active_defrag_hits]
+ set frag [s allocator_frag_ratio]
+ if {$::verbose} {
+ puts "frag $frag"
+ puts "hits: $hits"
+ puts "misses: $misses"
+ }
+ assert {$frag < 1.1}
+ assert {$misses < 10000000} ;# when defrag doesn't stop, we have some 30m misses, when it does, we have 2m misses
+ }
+
+ # verify the data isn't corrupted or changed
+ set newdigest [debug_digest]
+ assert {$digest eq $newdigest}
+ r save ;# saving an rdb iterates over all the data / pointers
+ }
+ }
+ }
+}
+} ;# run_solo
diff --git a/tests/unit/moduleapi/aclcheck.tcl b/tests/unit/moduleapi/aclcheck.tcl
new file mode 100644
index 0000000..ae3f671
--- /dev/null
+++ b/tests/unit/moduleapi/aclcheck.tcl
@@ -0,0 +1,137 @@
+set testmodule [file normalize tests/modules/aclcheck.so]
+
+start_server {tags {"modules acl"}} {
+ r module load $testmodule
+
+ test {test module check acl for command perm} {
+ # by default all commands allowed
+ assert_equal [r aclcheck.rm_call.check.cmd set x 5] OK
+ # block SET command for user
+ r acl setuser default -set
+ catch {r aclcheck.rm_call.check.cmd set x 5} e
+ assert_match {*DENIED CMD*} $e
+
+ # verify that new log entry added
+ set entry [lindex [r ACL LOG] 0]
+ assert {[dict get $entry username] eq {default}}
+ assert {[dict get $entry context] eq {module}}
+ assert {[dict get $entry object] eq {set}}
+ assert {[dict get $entry reason] eq {command}}
+ }
+
+ test {test module check acl for key perm} {
+ # give permission for SET and block all keys but x(READ+WRITE), y(WRITE), z(READ)
+ r acl setuser default +set resetkeys ~x %W~y %R~z
+
+ assert_equal [r aclcheck.set.check.key "*" x 5] OK
+ catch {r aclcheck.set.check.key "*" v 5} e
+ assert_match "*DENIED KEY*" $e
+
+ assert_equal [r aclcheck.set.check.key "~" x 5] OK
+ assert_equal [r aclcheck.set.check.key "~" y 5] OK
+ assert_equal [r aclcheck.set.check.key "~" z 5] OK
+ catch {r aclcheck.set.check.key "~" v 5} e
+ assert_match "*DENIED KEY*" $e
+
+ assert_equal [r aclcheck.set.check.key "W" y 5] OK
+ catch {r aclcheck.set.check.key "W" v 5} e
+ assert_match "*DENIED KEY*" $e
+
+ assert_equal [r aclcheck.set.check.key "R" z 5] OK
+ catch {r aclcheck.set.check.key "R" v 5} e
+ assert_match "*DENIED KEY*" $e
+ }
+
+ test {test module check acl for module user} {
+ # the module user has access to all keys
+ assert_equal [r aclcheck.rm_call.check.cmd.module.user set y 5] OK
+ }
+
+ test {test module check acl for channel perm} {
+ # block all channels but ch1
+ r acl setuser default resetchannels &ch1
+ assert_equal [r aclcheck.publish.check.channel ch1 msg] 0
+ catch {r aclcheck.publish.check.channel ch2 msg} e
+ set e
+ } {*DENIED CHANNEL*}
+
+ test {test module check acl in rm_call} {
+ # rm call check for key permission (x: READ + WRITE)
+ assert_equal [r aclcheck.rm_call set x 5] OK
+ assert_equal [r aclcheck.rm_call set x 6 get] 5
+
+ # rm call check for key permission (y: only WRITE)
+ assert_equal [r aclcheck.rm_call set y 5] OK
+ assert_error {*NOPERM*} {r aclcheck.rm_call set y 5 get}
+ assert_error {*NOPERM*No permissions to access a key*} {r aclcheck.rm_call_with_errors set y 5 get}
+
+ # rm call check for key permission (z: only READ)
+ assert_error {*NOPERM*} {r aclcheck.rm_call set z 5}
+ catch {r aclcheck.rm_call_with_errors set z 5} e
+ assert_match {*NOPERM*No permissions to access a key*} $e
+ assert_error {*NOPERM*} {r aclcheck.rm_call set z 6 get}
+ assert_error {*NOPERM*No permissions to access a key*} {r aclcheck.rm_call_with_errors set z 6 get}
+
+ # verify that new log entry added
+ set entry [lindex [r ACL LOG] 0]
+ assert {[dict get $entry username] eq {default}}
+ assert {[dict get $entry context] eq {module}}
+ assert {[dict get $entry object] eq {z}}
+ assert {[dict get $entry reason] eq {key}}
+
+ # rm call check for command permission
+ r acl setuser default -set
+ assert_error {*NOPERM*} {r aclcheck.rm_call set x 5}
+ assert_error {*NOPERM*has no permissions to run the 'set' command*} {r aclcheck.rm_call_with_errors set x 5}
+
+ # verify that new log entry added
+ set entry [lindex [r ACL LOG] 0]
+ assert {[dict get $entry username] eq {default}}
+ assert {[dict get $entry context] eq {module}}
+ assert {[dict get $entry object] eq {set}}
+ assert {[dict get $entry reason] eq {command}}
+ }
+
+ test {test blocking of Commands outside of OnLoad} {
+ assert_equal [r block.commands.outside.onload] OK
+ }
+
+ test {test users to have access to module commands having acl categories} {
+ r acl SETUSER j1 on >password -@all +@WRITE
+ r acl SETUSER j2 on >password -@all +@READ
+ assert_equal [r acl DRYRUN j1 aclcheck.module.command.aclcategories.write] OK
+ assert_equal [r acl DRYRUN j2 aclcheck.module.command.aclcategories.write.function.read.category] OK
+ assert_equal [r acl DRYRUN j2 aclcheck.module.command.aclcategories.read.only.category] OK
+ }
+
+ test {test existing users to have access to module commands loaded on runtime} {
+ assert_equal [r module unload aclcheck] OK
+ r acl SETUSER j3 on >password -@all +@WRITE
+ assert_equal [r module load $testmodule] OK
+ assert_equal [r acl DRYRUN j3 aclcheck.module.command.aclcategories.write] OK
+ }
+
+ test {test existing users without permissions, do not have access to module commands loaded on runtime.} {
+ assert_equal [r module unload aclcheck] OK
+ r acl SETUSER j4 on >password -@all +@READ
+ r acl SETUSER j5 on >password -@all +@WRITE
+ assert_equal [r module load $testmodule] OK
+ catch {r acl DRYRUN j4 aclcheck.module.command.aclcategories.write} e
+ assert_equal {User j4 has no permissions to run the 'aclcheck.module.command.aclcategories.write' command} $e
+ catch {r acl DRYRUN j5 aclcheck.module.command.aclcategories.write.function.read.category} e
+ assert_equal {User j5 has no permissions to run the 'aclcheck.module.command.aclcategories.write.function.read.category' command} $e
+ }
+
+ test {test users without permissions, do not have access to module commands.} {
+ r acl SETUSER j6 on >password -@all +@READ
+ catch {r acl DRYRUN j6 aclcheck.module.command.aclcategories.write} e
+ assert_equal {User j6 has no permissions to run the 'aclcheck.module.command.aclcategories.write' command} $e
+ r acl SETUSER j7 on >password -@all +@WRITE
+ catch {r acl DRYRUN j7 aclcheck.module.command.aclcategories.write.function.read.category} e
+ assert_equal {User j7 has no permissions to run the 'aclcheck.module.command.aclcategories.write.function.read.category' command} $e
+ }
+
+ test "Unload the module - aclcheck" {
+ assert_equal {OK} [r module unload aclcheck]
+ }
+}
diff --git a/tests/unit/moduleapi/async_rm_call.tcl b/tests/unit/moduleapi/async_rm_call.tcl
new file mode 100644
index 0000000..1bf12de
--- /dev/null
+++ b/tests/unit/moduleapi/async_rm_call.tcl
@@ -0,0 +1,437 @@
+set testmodule [file normalize tests/modules/blockedclient.so]
+set testmodule2 [file normalize tests/modules/postnotifications.so]
+set testmodule3 [file normalize tests/modules/blockonkeys.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {Locked GIL acquisition from async RM_Call} {
+ assert_equal {OK} [r do_rm_call_async acquire_gil]
+ }
+
+ test "Blpop on async RM_Call fire and forget" {
+ assert_equal {Blocked} [r do_rm_call_fire_and_forget blpop l 0]
+ r lpush l a
+ assert_equal {0} [r llen l]
+ }
+
+ test "Blpop on threaded async RM_Call" {
+ set rd [redis_deferring_client]
+
+ $rd do_rm_call_async_on_thread blpop l 0
+ wait_for_blocked_clients_count 1
+ r lpush l a
+ assert_equal [$rd read] {l a}
+ wait_for_blocked_clients_count 0
+ $rd close
+ }
+
+ foreach cmd {do_rm_call_async do_rm_call_async_script_mode } {
+
+ test "Blpop on async RM_Call using $cmd" {
+ set rd [redis_deferring_client]
+
+ $rd $cmd blpop l 0
+ wait_for_blocked_clients_count 1
+ r lpush l a
+ assert_equal [$rd read] {l a}
+ wait_for_blocked_clients_count 0
+ $rd close
+ }
+
+ test "Brpop on async RM_Call using $cmd" {
+ set rd [redis_deferring_client]
+
+ $rd $cmd brpop l 0
+ wait_for_blocked_clients_count 1
+ r lpush l a
+ assert_equal [$rd read] {l a}
+ wait_for_blocked_clients_count 0
+ $rd close
+ }
+
+ test "Brpoplpush on async RM_Call using $cmd" {
+ set rd [redis_deferring_client]
+
+ $rd $cmd brpoplpush l1 l2 0
+ wait_for_blocked_clients_count 1
+ r lpush l1 a
+ assert_equal [$rd read] {a}
+ wait_for_blocked_clients_count 0
+ $rd close
+ r lpop l2
+ } {a}
+
+ test "Blmove on async RM_Call using $cmd" {
+ set rd [redis_deferring_client]
+
+ $rd $cmd blmove l1 l2 LEFT LEFT 0
+ wait_for_blocked_clients_count 1
+ r lpush l1 a
+ assert_equal [$rd read] {a}
+ wait_for_blocked_clients_count 0
+ $rd close
+ r lpop l2
+ } {a}
+
+ test "Bzpopmin on async RM_Call using $cmd" {
+ set rd [redis_deferring_client]
+
+ $rd $cmd bzpopmin s 0
+ wait_for_blocked_clients_count 1
+ r zadd s 10 foo
+ assert_equal [$rd read] {s foo 10}
+ wait_for_blocked_clients_count 0
+ $rd close
+ }
+
+ test "Bzpopmax on async RM_Call using $cmd" {
+ set rd [redis_deferring_client]
+
+ $rd $cmd bzpopmax s 0
+ wait_for_blocked_clients_count 1
+ r zadd s 10 foo
+ assert_equal [$rd read] {s foo 10}
+ wait_for_blocked_clients_count 0
+ $rd close
+ }
+ }
+
+ test {Nested async RM_Call} {
+ set rd [redis_deferring_client]
+
+ $rd do_rm_call_async do_rm_call_async do_rm_call_async do_rm_call_async blpop l 0
+ wait_for_blocked_clients_count 1
+ r lpush l a
+ assert_equal [$rd read] {l a}
+ wait_for_blocked_clients_count 0
+ $rd close
+ }
+
+ test {Test multiple async RM_Call waiting on the same event} {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+
+ $rd1 do_rm_call_async do_rm_call_async do_rm_call_async do_rm_call_async blpop l 0
+ $rd2 do_rm_call_async do_rm_call_async do_rm_call_async do_rm_call_async blpop l 0
+ wait_for_blocked_clients_count 2
+ r lpush l element element
+ assert_equal [$rd1 read] {l element}
+ assert_equal [$rd2 read] {l element}
+ wait_for_blocked_clients_count 0
+ $rd1 close
+ $rd2 close
+ }
+
+ test {async RM_Call calls RM_Call} {
+ assert_equal {PONG} [r do_rm_call_async do_rm_call ping]
+ }
+
+ test {async RM_Call calls background RM_Call calls RM_Call} {
+ assert_equal {PONG} [r do_rm_call_async do_bg_rm_call do_rm_call ping]
+ }
+
+ test {async RM_Call calls background RM_Call calls RM_Call calls async RM_Call} {
+ assert_equal {PONG} [r do_rm_call_async do_bg_rm_call do_rm_call do_rm_call_async ping]
+ }
+
+ test {async RM_Call inside async RM_Call callback} {
+ set rd [redis_deferring_client]
+ $rd wait_and_do_rm_call blpop l 0
+ wait_for_blocked_clients_count 1
+
+ start_server {} {
+ test "Connect a replica to the master instance" {
+ r slaveof [srv -1 host] [srv -1 port]
+ wait_for_condition 50 100 {
+ [s role] eq {slave} &&
+ [string match {*master_link_status:up*} [r info replication]]
+ } else {
+ fail "Can't turn the instance into a replica"
+ }
+ }
+
+ assert_equal {1} [r -1 lpush l a]
+ assert_equal [$rd read] {l a}
+ }
+
+ wait_for_blocked_clients_count 0
+ $rd close
+ }
+
+ test {Become replica while having async RM_Call running} {
+ r flushall
+ set rd [redis_deferring_client]
+ $rd do_rm_call_async blpop l 0
+ wait_for_blocked_clients_count 1
+
+ #become a replica of a not existing redis
+ r replicaof localhost 30000
+
+ catch {[$rd read]} e
+ assert_match {UNBLOCKED force unblock from blocking operation*} $e
+ wait_for_blocked_clients_count 0
+
+ r replicaof no one
+
+ r lpush l 1
+ # make sure the async rm_call was aborted
+ assert_equal [r llen l] {1}
+ $rd close
+ }
+
+ test {Pipeline with blocking RM_Call} {
+ r flushall
+ set rd [redis_deferring_client]
+ set buf ""
+ append buf "do_rm_call_async blpop l 0\r\n"
+ append buf "ping\r\n"
+ $rd write $buf
+ $rd flush
+ wait_for_blocked_clients_count 1
+
+ # release the blocked client
+ r lpush l 1
+
+ assert_equal [$rd read] {l 1}
+ assert_equal [$rd read] {PONG}
+
+ wait_for_blocked_clients_count 0
+ $rd close
+ }
+
+ test {blocking RM_Call abort} {
+ r flushall
+ set rd [redis_deferring_client]
+
+ $rd client id
+ set client_id [$rd read]
+
+ $rd do_rm_call_async blpop l 0
+ wait_for_blocked_clients_count 1
+
+ r client kill ID $client_id
+ assert_error {*error reading reply*} {$rd read}
+
+ wait_for_blocked_clients_count 0
+
+ r lpush l 1
+ # make sure the async rm_call was aborted
+ assert_equal [r llen l] {1}
+ $rd close
+ }
+}
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {Test basic replication stream on unblock handler} {
+ r flushall
+ set repl [attach_to_replication_stream]
+
+ set rd [redis_deferring_client]
+
+ $rd do_rm_call_async blpop l 0
+ wait_for_blocked_clients_count 1
+ r lpush l a
+ assert_equal [$rd read] {l a}
+
+ assert_replication_stream $repl {
+ {select *}
+ {lpush l a}
+ {lpop l}
+ }
+ close_replication_stream $repl
+
+ wait_for_blocked_clients_count 0
+ $rd close
+ }
+
+ test {Test unblock handler are executed as a unit} {
+ r flushall
+ set repl [attach_to_replication_stream]
+
+ set rd [redis_deferring_client]
+
+ $rd blpop_and_set_multiple_keys l x 1 y 2
+ wait_for_blocked_clients_count 1
+ r lpush l a
+ assert_equal [$rd read] {OK}
+
+ assert_replication_stream $repl {
+ {select *}
+ {lpush l a}
+ {multi}
+ {lpop l}
+ {set x 1}
+ {set y 2}
+ {exec}
+ }
+ close_replication_stream $repl
+
+ wait_for_blocked_clients_count 0
+ $rd close
+ }
+
+ test {Test no propagation of blocking command} {
+ r flushall
+ set repl [attach_to_replication_stream]
+
+ set rd [redis_deferring_client]
+
+ $rd do_rm_call_async_no_replicate blpop l 0
+ wait_for_blocked_clients_count 1
+ r lpush l a
+ assert_equal [$rd read] {l a}
+
+ # make sure the lpop are not replicated
+ r set x 1
+
+ assert_replication_stream $repl {
+ {select *}
+ {lpush l a}
+ {set x 1}
+ }
+ close_replication_stream $repl
+
+ wait_for_blocked_clients_count 0
+ $rd close
+ }
+}
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+ r module load $testmodule2
+
+ test {Test unblock handler are executed as a unit with key space notifications} {
+ r flushall
+ set repl [attach_to_replication_stream]
+
+ set rd [redis_deferring_client]
+
+ $rd blpop_and_set_multiple_keys l string_foo 1 string_bar 2
+ wait_for_blocked_clients_count 1
+ r lpush l a
+ assert_equal [$rd read] {OK}
+
+ # Explanation of the first multi exec block:
+ # {lpop l} - pop the value by our blocking command 'blpop_and_set_multiple_keys'
+ # {set string_foo 1} - the action of our blocking command 'blpop_and_set_multiple_keys'
+ # {set string_bar 2} - the action of our blocking command 'blpop_and_set_multiple_keys'
+ # {incr string_changed{string_foo}} - post notification job that was registered when 'string_foo' changed
+ # {incr string_changed{string_bar}} - post notification job that was registered when 'string_bar' changed
+ # {incr string_total} - post notification job that was registered when string_changed{string_foo} changed
+ # {incr string_total} - post notification job that was registered when string_changed{string_bar} changed
+ assert_replication_stream $repl {
+ {select *}
+ {lpush l a}
+ {multi}
+ {lpop l}
+ {set string_foo 1}
+ {set string_bar 2}
+ {incr string_changed{string_foo}}
+ {incr string_changed{string_bar}}
+ {incr string_total}
+ {incr string_total}
+ {exec}
+ }
+ close_replication_stream $repl
+
+ wait_for_blocked_clients_count 0
+ $rd close
+ }
+
+ test {Test unblock handler are executed as a unit with lazy expire} {
+ r flushall
+ r DEBUG SET-ACTIVE-EXPIRE 0
+ set repl [attach_to_replication_stream]
+
+ set rd [redis_deferring_client]
+
+ $rd blpop_and_set_multiple_keys l string_foo 1 string_bar 2
+ wait_for_blocked_clients_count 1
+ r lpush l a
+ assert_equal [$rd read] {OK}
+
+ # set expiration on string_foo
+ r pexpire string_foo 1
+ after 10
+
+ # now the key should have been expired
+ $rd blpop_and_set_multiple_keys l string_foo 1 string_bar 2
+ wait_for_blocked_clients_count 1
+ r lpush l a
+ assert_equal [$rd read] {OK}
+
+ # Explanation of the first multi exec block:
+ # {lpop l} - pop the value by our blocking command 'blpop_and_set_multiple_keys'
+ # {set string_foo 1} - the action of our blocking command 'blpop_and_set_multiple_keys'
+ # {set string_bar 2} - the action of our blocking command 'blpop_and_set_multiple_keys'
+ # {incr string_changed{string_foo}} - post notification job that was registered when 'string_foo' changed
+ # {incr string_changed{string_bar}} - post notification job that was registered when 'string_bar' changed
+ # {incr string_total} - post notification job that was registered when string_changed{string_foo} changed
+ # {incr string_total} - post notification job that was registered when string_changed{string_bar} changed
+ #
+ # Explanation of the second multi exec block:
+ # {lpop l} - pop the value by our blocking command 'blpop_and_set_multiple_keys'
+ # {del string_foo} - lazy expiration of string_foo when 'blpop_and_set_multiple_keys' tries to write to it.
+ # {set string_foo 1} - the action of our blocking command 'blpop_and_set_multiple_keys'
+ # {set string_bar 2} - the action of our blocking command 'blpop_and_set_multiple_keys'
+ # {incr expired} - the post notification job, registered after string_foo got expired
+ # {incr string_changed{string_foo}} - post notification job triggered when we set string_foo
+ # {incr string_changed{string_bar}} - post notification job triggered when we set string_bar
+ # {incr string_total} - post notification job triggered when we incr 'string_changed{string_foo}'
+ # {incr string_total} - post notification job triggered when we incr 'string_changed{string_bar}'
+ assert_replication_stream $repl {
+ {select *}
+ {lpush l a}
+ {multi}
+ {lpop l}
+ {set string_foo 1}
+ {set string_bar 2}
+ {incr string_changed{string_foo}}
+ {incr string_changed{string_bar}}
+ {incr string_total}
+ {incr string_total}
+ {exec}
+ {pexpireat string_foo *}
+ {lpush l a}
+ {multi}
+ {lpop l}
+ {del string_foo}
+ {set string_foo 1}
+ {set string_bar 2}
+ {incr expired}
+ {incr string_changed{string_foo}}
+ {incr string_changed{string_bar}}
+ {incr string_total}
+ {incr string_total}
+ {exec}
+ }
+ close_replication_stream $repl
+ r DEBUG SET-ACTIVE-EXPIRE 1
+
+ wait_for_blocked_clients_count 0
+ $rd close
+ }
+}
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+ r module load $testmodule3
+
+ test {Test unblock handler on module blocked on keys} {
+ set rd [redis_deferring_client]
+
+ r fsl.push l 1
+ $rd do_rm_call_async FSL.BPOPGT l 3 0
+ wait_for_blocked_clients_count 1
+ r fsl.push l 2
+ r fsl.push l 3
+ r fsl.push l 4
+ assert_equal [$rd read] {4}
+
+ wait_for_blocked_clients_count 0
+ $rd close
+ }
+}
diff --git a/tests/unit/moduleapi/auth.tcl b/tests/unit/moduleapi/auth.tcl
new file mode 100644
index 0000000..c7c2def
--- /dev/null
+++ b/tests/unit/moduleapi/auth.tcl
@@ -0,0 +1,90 @@
+set testmodule [file normalize tests/modules/auth.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {Modules can create a user that can be authenticated} {
+ # Make sure we start authenticated with default user
+ r auth default ""
+ assert_equal [r acl whoami] "default"
+ r auth.createmoduleuser
+
+ set id [r auth.authmoduleuser]
+ assert_equal [r client id] $id
+
+ # Verify returned id is the same as our current id and
+ # we are authenticated with the specified user
+ assert_equal [r acl whoami] "global"
+ }
+
+ test {De-authenticating clients is tracked and kills clients} {
+ assert_equal [r auth.changecount] 0
+ r auth.createmoduleuser
+
+ # Catch the I/O exception that was thrown when Redis
+ # disconnected with us.
+ catch { [r ping] } e
+ assert_match {*I/O*} $e
+
+ # Check that a user change was registered
+ assert_equal [r auth.changecount] 1
+ }
+
+ test {Modules can't authenticate with ACLs users that dont exist} {
+ catch { [r auth.authrealuser auth-module-test-fake] } e
+ assert_match {*Invalid user*} $e
+ }
+
+ test {Modules can authenticate with ACL users} {
+ assert_equal [r acl whoami] "default"
+
+ # Create user to auth into
+ r acl setuser auth-module-test on allkeys allcommands
+
+ set id [r auth.authrealuser auth-module-test]
+
+ # Verify returned id is the same as our current id and
+ # we are authenticated with the specified user
+ assert_equal [r client id] $id
+ assert_equal [r acl whoami] "auth-module-test"
+ }
+
+ test {Client callback is called on user switch} {
+ assert_equal [r auth.changecount] 0
+
+ # Auth again and validate change count
+ r auth.authrealuser auth-module-test
+ assert_equal [r auth.changecount] 1
+
+ # Re-auth with the default user
+ r auth default ""
+ assert_equal [r auth.changecount] 1
+ assert_equal [r acl whoami] "default"
+
+ # Re-auth with the default user again, to
+ # verify the callback isn't fired again
+ r auth default ""
+ assert_equal [r auth.changecount] 0
+ assert_equal [r acl whoami] "default"
+ }
+
+ test {modules can redact arguments} {
+ r config set slowlog-log-slower-than 0
+ r slowlog reset
+ r auth.redact 1 2 3 4
+ r auth.redact 1 2 3
+ r config set slowlog-log-slower-than -1
+ set slowlog_resp [r slowlog get]
+
+ # There will be 3 records, slowlog reset and the
+ # two auth redact calls.
+ assert_equal 3 [llength $slowlog_resp]
+ assert_equal {slowlog reset} [lindex [lindex $slowlog_resp 2] 3]
+ assert_equal {auth.redact 1 (redacted) 3 (redacted)} [lindex [lindex $slowlog_resp 1] 3]
+ assert_equal {auth.redact (redacted) 2 (redacted)} [lindex [lindex $slowlog_resp 0] 3]
+ }
+
+ test "Unload the module - testacl" {
+ assert_equal {OK} [r module unload testacl]
+ }
+}
diff --git a/tests/unit/moduleapi/basics.tcl b/tests/unit/moduleapi/basics.tcl
new file mode 100644
index 0000000..042e347
--- /dev/null
+++ b/tests/unit/moduleapi/basics.tcl
@@ -0,0 +1,46 @@
+set testmodule [file normalize tests/modules/basics.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {test module api basics} {
+ r test.basics
+ } {ALL TESTS PASSED}
+
+ test {test rm_call auto mode} {
+ r hello 2
+ set reply [r test.rmcallautomode]
+ assert_equal [lindex $reply 0] f1
+ assert_equal [lindex $reply 1] v1
+ assert_equal [lindex $reply 2] f2
+ assert_equal [lindex $reply 3] v2
+ r hello 3
+ set reply [r test.rmcallautomode]
+ assert_equal [dict get $reply f1] v1
+ assert_equal [dict get $reply f2] v2
+ }
+
+ test {test get resp} {
+ foreach resp {3 2} {
+ if {[lsearch $::denytags "resp3"] >= 0} {
+ if {$resp == 3} {continue}
+ } elseif {$::force_resp3} {
+ if {$resp == 2} {continue}
+ }
+ r hello $resp
+ set reply [r test.getresp]
+ assert_equal $reply $resp
+ r hello 2
+ }
+ }
+
+ test "Unload the module - test" {
+ assert_equal {OK} [r module unload test]
+ }
+}
+
+start_server {tags {"modules external:skip"} overrides {enable-module-command no}} {
+ test {module command disabled} {
+ assert_error "ERR *MODULE command not allowed*" {r module load $testmodule}
+ }
+}
diff --git a/tests/unit/moduleapi/blockedclient.tcl b/tests/unit/moduleapi/blockedclient.tcl
new file mode 100644
index 0000000..9d475eb
--- /dev/null
+++ b/tests/unit/moduleapi/blockedclient.tcl
@@ -0,0 +1,287 @@
+set testmodule [file normalize tests/modules/blockedclient.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {Locked GIL acquisition} {
+ assert_match "OK" [r acquire_gil]
+ }
+
+ test {Locked GIL acquisition during multi} {
+ r multi
+ r acquire_gil
+ assert_equal {{Blocked client is not supported inside multi}} [r exec]
+ }
+
+ test {Locked GIL acquisition from RM_Call} {
+ assert_equal {Blocked client is not allowed} [r do_rm_call acquire_gil]
+ }
+
+ test {Blocking command are not block the client on RM_Call} {
+ r lpush l test
+ assert_equal [r do_rm_call blpop l 0] {l test}
+
+ r lpush l test
+ assert_equal [r do_rm_call brpop l 0] {l test}
+
+ r lpush l1 test
+ assert_equal [r do_rm_call brpoplpush l1 l2 0] {test}
+ assert_equal [r do_rm_call brpop l2 0] {l2 test}
+
+ r lpush l1 test
+ assert_equal [r do_rm_call blmove l1 l2 LEFT LEFT 0] {test}
+ assert_equal [r do_rm_call brpop l2 0] {l2 test}
+
+ r ZADD zset1 0 a 1 b 2 c
+ assert_equal [r do_rm_call bzpopmin zset1 0] {zset1 a 0}
+ assert_equal [r do_rm_call bzpopmax zset1 0] {zset1 c 2}
+
+ r xgroup create s g $ MKSTREAM
+ r xadd s * foo bar
+ assert {[r do_rm_call xread BLOCK 0 STREAMS s 0-0] ne {}}
+ assert {[r do_rm_call xreadgroup group g c BLOCK 0 STREAMS s >] ne {}}
+
+ assert {[r do_rm_call blpop empty_list 0] eq {}}
+ assert {[r do_rm_call brpop empty_list 0] eq {}}
+ assert {[r do_rm_call brpoplpush empty_list1 empty_list2 0] eq {}}
+ assert {[r do_rm_call blmove empty_list1 empty_list2 LEFT LEFT 0] eq {}}
+
+ assert {[r do_rm_call bzpopmin empty_zset 0] eq {}}
+ assert {[r do_rm_call bzpopmax empty_zset 0] eq {}}
+
+ r xgroup create empty_stream g $ MKSTREAM
+ assert {[r do_rm_call xread BLOCK 0 STREAMS empty_stream $] eq {}}
+ assert {[r do_rm_call xreadgroup group g c BLOCK 0 STREAMS empty_stream >] eq {}}
+
+ }
+
+ test {Monitor disallow inside RM_Call} {
+ set e {}
+ catch {
+ r do_rm_call monitor
+ } e
+ set e
+ } {*ERR*DENY BLOCKING*}
+
+ test {subscribe disallow inside RM_Call} {
+ set e {}
+ catch {
+ r do_rm_call subscribe x
+ } e
+ set e
+ } {*ERR*DENY BLOCKING*}
+
+ test {RM_Call from blocked client} {
+ r hset hash foo bar
+ r do_bg_rm_call hgetall hash
+ } {foo bar}
+
+ test {RM_Call from blocked client with script mode} {
+ r do_bg_rm_call_format S hset k foo bar
+ } {1}
+
+ test {RM_Call from blocked client with oom mode} {
+ r config set maxmemory 1
+ # will set server.pre_command_oom_state to 1
+ assert_error {OOM command not allowed*} {r hset hash foo bar}
+ r config set maxmemory 0
+ # now its should be OK to call OOM commands
+ r do_bg_rm_call_format M hset k1 foo bar
+ } {1} {needs:config-maxmemory}
+
+ test {RESP version carries through to blocked client} {
+ for {set client_proto 2} {$client_proto <= 3} {incr client_proto} {
+ if {[lsearch $::denytags "resp3"] >= 0} {
+ if {$client_proto == 3} {continue}
+ } elseif {$::force_resp3} {
+ if {$client_proto == 2} {continue}
+ }
+ r hello $client_proto
+ r readraw 1
+ set ret [r do_fake_bg_true]
+ if {$client_proto == 2} {
+ assert_equal $ret {:1}
+ } else {
+ assert_equal $ret "#t"
+ }
+ r readraw 0
+ r hello 2
+ }
+ }
+
+foreach call_type {nested normal} {
+ test "Busy module command - $call_type" {
+ set busy_time_limit 50
+ set old_time_limit [lindex [r config get busy-reply-threshold] 1]
+ r config set busy-reply-threshold $busy_time_limit
+ set rd [redis_deferring_client]
+
+ # run command that blocks until released
+ set start [clock clicks -milliseconds]
+ if {$call_type == "nested"} {
+ $rd do_rm_call slow_fg_command 0
+ } else {
+ $rd slow_fg_command 0
+ }
+ $rd flush
+
+ # send another command after the blocked one, to make sure we don't attempt to process it
+ $rd ping
+ $rd flush
+
+ # make sure we get BUSY error, and that we didn't get it too early
+ assert_error {*BUSY Slow module operation*} {r ping}
+ assert_morethan_equal [expr [clock clicks -milliseconds]-$start] $busy_time_limit
+
+ # abort the blocking operation
+ r stop_slow_fg_command
+ wait_for_condition 50 100 {
+ [catch {r ping} e] == 0
+ } else {
+ fail "Failed waiting for busy command to end"
+ }
+ assert_equal [$rd read] "1"
+ assert_equal [$rd read] "PONG"
+
+ # run command that blocks for 200ms
+ set start [clock clicks -milliseconds]
+ if {$call_type == "nested"} {
+ $rd do_rm_call slow_fg_command 200000
+ } else {
+ $rd slow_fg_command 200000
+ }
+ $rd flush
+ after 10 ;# try to make sure redis started running the command before we proceed
+
+ # make sure we didn't get BUSY error, it simply blocked till the command was done
+ r ping
+ assert_morethan_equal [expr [clock clicks -milliseconds]-$start] 200
+ $rd read
+
+ $rd close
+ r config set busy-reply-threshold $old_time_limit
+ }
+}
+
+ test {RM_Call from blocked client} {
+ set busy_time_limit 50
+ set old_time_limit [lindex [r config get busy-reply-threshold] 1]
+ r config set busy-reply-threshold $busy_time_limit
+
+ # trigger slow operation
+ r set_slow_bg_operation 1
+ r hset hash foo bar
+ set rd [redis_deferring_client]
+ set start [clock clicks -milliseconds]
+ $rd do_bg_rm_call hgetall hash
+
+ # send another command after the blocked one, to make sure we don't attempt to process it
+ $rd ping
+ $rd flush
+
+ # wait till we know we're blocked inside the module
+ wait_for_condition 50 100 {
+ [r is_in_slow_bg_operation] eq 1
+ } else {
+ fail "Failed waiting for slow operation to start"
+ }
+
+ # make sure we get BUSY error, and that we didn't get here too early
+ assert_error {*BUSY Slow module operation*} {r ping}
+ assert_morethan [expr [clock clicks -milliseconds]-$start] $busy_time_limit
+ # abort the blocking operation
+ r set_slow_bg_operation 0
+
+ wait_for_condition 50 100 {
+ [r is_in_slow_bg_operation] eq 0
+ } else {
+ fail "Failed waiting for slow operation to stop"
+ }
+ assert_equal [r ping] {PONG}
+
+ r config set busy-reply-threshold $old_time_limit
+ assert_equal [$rd read] {foo bar}
+ assert_equal [$rd read] {PONG}
+ $rd close
+ }
+
+ test {blocked client reaches client output buffer limit} {
+ r hset hash big [string repeat x 50000]
+ r hset hash bada [string repeat x 50000]
+ r hset hash boom [string repeat x 50000]
+ r config set client-output-buffer-limit {normal 100000 0 0}
+ r client setname myclient
+ catch {r do_bg_rm_call hgetall hash} e
+ assert_match "*I/O error*" $e
+ reconnect
+ set clients [r client list]
+ assert_no_match "*name=myclient*" $clients
+ }
+
+ test {module client error stats} {
+ r config resetstat
+
+ # simple module command that replies with string error
+ assert_error "ERR unknown command 'hgetalllll', with args beginning with:" {r do_rm_call hgetalllll}
+ assert_equal [errorrstat ERR r] {count=1}
+
+ # simple module command that replies with string error
+ assert_error "ERR unknown subcommand 'bla'. Try CONFIG HELP." {r do_rm_call config bla}
+ assert_equal [errorrstat ERR r] {count=2}
+
+ # module command that replies with string error from bg thread
+ assert_error "NULL reply returned" {r do_bg_rm_call hgetalllll}
+ assert_equal [errorrstat NULL r] {count=1}
+
+ # module command that returns an arity error
+ r do_rm_call set x x
+ assert_error "ERR wrong number of arguments for 'do_rm_call' command" {r do_rm_call}
+ assert_equal [errorrstat ERR r] {count=3}
+
+ # RM_Call that propagates an error
+ assert_error "WRONGTYPE*" {r do_rm_call hgetall x}
+ assert_equal [errorrstat WRONGTYPE r] {count=1}
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdrstat hgetall r]
+
+ # RM_Call from bg thread that propagates an error
+ assert_error "WRONGTYPE*" {r do_bg_rm_call hgetall x}
+ assert_equal [errorrstat WRONGTYPE r] {count=2}
+ assert_match {*calls=2,*,rejected_calls=0,failed_calls=2} [cmdrstat hgetall r]
+
+ assert_equal [s total_error_replies] 6
+ assert_match {*calls=5,*,rejected_calls=0,failed_calls=4} [cmdrstat do_rm_call r]
+ assert_match {*calls=2,*,rejected_calls=0,failed_calls=2} [cmdrstat do_bg_rm_call r]
+ }
+
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+ start_server [list overrides [list loadmodule "$testmodule"]] {
+ set replica [srv 0 client]
+ set replica_host [srv 0 host]
+ set replica_port [srv 0 port]
+
+ # Start the replication process...
+ $replica replicaof $master_host $master_port
+ wait_for_sync $replica
+
+ test {WAIT command on module blocked client} {
+ pause_process [srv 0 pid]
+
+ $master do_bg_rm_call_format ! hset bk1 foo bar
+
+ assert_equal [$master wait 1 1000] 0
+ resume_process [srv 0 pid]
+ assert_equal [$master wait 1 1000] 1
+ assert_equal [$replica hget bk1 foo] bar
+ }
+ }
+
+ test {Unblock by timer} {
+ assert_match "OK" [r unblock_by_timer 100]
+ }
+
+ test "Unload the module - blockedclient" {
+ assert_equal {OK} [r module unload blockedclient]
+ }
+}
diff --git a/tests/unit/moduleapi/blockonbackground.tcl b/tests/unit/moduleapi/blockonbackground.tcl
new file mode 100644
index 0000000..fcd7f1d
--- /dev/null
+++ b/tests/unit/moduleapi/blockonbackground.tcl
@@ -0,0 +1,126 @@
+set testmodule [file normalize tests/modules/blockonbackground.so]
+
+source tests/support/util.tcl
+
+proc latency_percentiles_usec {cmd} {
+ return [latencyrstat_percentiles $cmd r]
+}
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test { blocked clients time tracking - check blocked command that uses RedisModule_BlockedClientMeasureTimeStart() is tracking background time} {
+ r slowlog reset
+ r config set slowlog-log-slower-than 200000
+ if {!$::no_latency} {
+ assert_equal [r slowlog len] 0
+ }
+ r block.debug 0 10000
+ if {!$::no_latency} {
+ assert_equal [r slowlog len] 0
+ }
+ r config resetstat
+ r config set latency-tracking yes
+ r config set latency-tracking-info-percentiles "50.0"
+ r block.debug 200 10000
+ if {!$::no_latency} {
+ assert_equal [r slowlog len] 1
+ }
+
+ set cmdstatline [cmdrstat block.debug r]
+ set latencystatline_debug [latency_percentiles_usec block.debug]
+
+ regexp "calls=1,usec=(.*?),usec_per_call=(.*?),rejected_calls=0,failed_calls=0" $cmdstatline -> usec usec_per_call
+ regexp "p50=(.+\..+)" $latencystatline_debug -> p50
+ assert {$usec >= 100000}
+ assert {$usec_per_call >= 100000}
+ assert {$p50 >= 100000}
+ }
+
+ test { blocked clients time tracking - check blocked command that uses RedisModule_BlockedClientMeasureTimeStart() is tracking background time even in timeout } {
+ r slowlog reset
+ r config set slowlog-log-slower-than 200000
+ if {!$::no_latency} {
+ assert_equal [r slowlog len] 0
+ }
+ r block.debug 0 20000
+ if {!$::no_latency} {
+ assert_equal [r slowlog len] 0
+ }
+ r config resetstat
+ r block.debug 20000 500
+ if {!$::no_latency} {
+ assert_equal [r slowlog len] 1
+ }
+
+ set cmdstatline [cmdrstat block.debug r]
+
+ regexp "calls=1,usec=(.*?),usec_per_call=(.*?),rejected_calls=0,failed_calls=0" $cmdstatline usec usec_per_call
+ assert {$usec >= 250000}
+ assert {$usec_per_call >= 250000}
+ }
+
+ test { blocked clients time tracking - check blocked command with multiple calls RedisModule_BlockedClientMeasureTimeStart() is tracking the total background time } {
+ r slowlog reset
+ r config set slowlog-log-slower-than 200000
+ if {!$::no_latency} {
+ assert_equal [r slowlog len] 0
+ }
+ r block.double_debug 0
+ if {!$::no_latency} {
+ assert_equal [r slowlog len] 0
+ }
+ r config resetstat
+ r block.double_debug 100
+ if {!$::no_latency} {
+ assert_equal [r slowlog len] 1
+ }
+ set cmdstatline [cmdrstat block.double_debug r]
+
+ regexp "calls=1,usec=(.*?),usec_per_call=(.*?),rejected_calls=0,failed_calls=0" $cmdstatline usec usec_per_call
+ assert {$usec >= 60000}
+ assert {$usec_per_call >= 60000}
+ }
+
+ test { blocked clients time tracking - check blocked command without calling RedisModule_BlockedClientMeasureTimeStart() is not reporting background time } {
+ r slowlog reset
+ r config set slowlog-log-slower-than 200000
+ if {!$::no_latency} {
+ assert_equal [r slowlog len] 0
+ }
+ r block.debug_no_track 200 1000
+ # ensure slowlog is still empty
+ if {!$::no_latency} {
+ assert_equal [r slowlog len] 0
+ }
+ }
+
+ test "client unblock works only for modules with timeout support" {
+ set rd [redis_deferring_client]
+ $rd client id
+ set id [$rd read]
+
+ # Block with a timeout function - may unblock
+ $rd block.block 20000
+ wait_for_condition 50 100 {
+ [r block.is_blocked] == 1
+ } else {
+ fail "Module did not block"
+ }
+
+ assert_equal 1 [r client unblock $id]
+ assert_match {*Timed out*} [$rd read]
+
+ # Block without a timeout function - cannot unblock
+ $rd block.block 0
+ wait_for_condition 50 100 {
+ [r block.is_blocked] == 1
+ } else {
+ fail "Module did not block"
+ }
+
+ assert_equal 0 [r client unblock $id]
+ assert_equal "OK" [r block.release foobar]
+ assert_equal "foobar" [$rd read]
+ }
+}
diff --git a/tests/unit/moduleapi/blockonkeys.tcl b/tests/unit/moduleapi/blockonkeys.tcl
new file mode 100644
index 0000000..66a94dc
--- /dev/null
+++ b/tests/unit/moduleapi/blockonkeys.tcl
@@ -0,0 +1,366 @@
+set testmodule [file normalize tests/modules/blockonkeys.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test "Module client blocked on keys: Circular BPOPPUSH" {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+
+ r del src dst
+
+ $rd1 fsl.bpoppush src dst 0
+ wait_for_blocked_clients_count 1
+
+ $rd2 fsl.bpoppush dst src 0
+ wait_for_blocked_clients_count 2
+
+ r fsl.push src 42
+ wait_for_blocked_clients_count 0
+
+ assert_equal {42} [r fsl.getall src]
+ assert_equal {} [r fsl.getall dst]
+ }
+
+ test "Module client blocked on keys: Self-referential BPOPPUSH" {
+ set rd1 [redis_deferring_client]
+
+ r del src
+
+ $rd1 fsl.bpoppush src src 0
+ wait_for_blocked_clients_count 1
+ r fsl.push src 42
+
+ assert_equal {42} [r fsl.getall src]
+ }
+
+ test "Module client blocked on keys: BPOPPUSH unblocked by timer" {
+ set rd1 [redis_deferring_client]
+
+ r del src dst
+
+ set repl [attach_to_replication_stream]
+
+ $rd1 fsl.bpoppush src dst 0
+ wait_for_blocked_clients_count 1
+
+ r fsl.pushtimer src 9000 10
+ wait_for_blocked_clients_count 0
+
+ assert_equal {9000} [r fsl.getall dst]
+ assert_equal {} [r fsl.getall src]
+
+ assert_replication_stream $repl {
+ {select *}
+ {fsl.push src 9000}
+ {fsl.bpoppush src dst 0}
+ }
+
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test {Module client blocked on keys (no metadata): No block} {
+ r del k
+ r fsl.push k 33
+ r fsl.push k 34
+ r fsl.bpop k 0
+ } {34}
+
+ test {Module client blocked on keys (no metadata): Timeout} {
+ r del k
+ set rd [redis_deferring_client]
+ $rd fsl.bpop k 1
+ assert_equal {Request timedout} [$rd read]
+ }
+
+ test {Module client blocked on keys (no metadata): Blocked} {
+ r del k
+ set rd [redis_deferring_client]
+ $rd fsl.bpop k 0
+ wait_for_blocked_clients_count 1
+ r fsl.push k 34
+ assert_equal {34} [$rd read]
+ }
+
+ test {Module client blocked on keys (with metadata): No block} {
+ r del k
+ r fsl.push k 34
+ r fsl.bpopgt k 30 0
+ } {34}
+
+ test {Module client blocked on keys (with metadata): Timeout} {
+ r del k
+ set rd [redis_deferring_client]
+ $rd client id
+ set cid [$rd read]
+ r fsl.push k 33
+ $rd fsl.bpopgt k 35 1
+ assert_equal {Request timedout} [$rd read]
+ r client kill id $cid ;# try to smoke-out client-related memory leak
+ }
+
+ test {Module client blocked on keys (with metadata): Blocked, case 1} {
+ r del k
+ set rd [redis_deferring_client]
+ $rd client id
+ set cid [$rd read]
+ r fsl.push k 33
+ $rd fsl.bpopgt k 33 0
+ wait_for_blocked_clients_count 1
+ r fsl.push k 34
+ assert_equal {34} [$rd read]
+ r client kill id $cid ;# try to smoke-out client-related memory leak
+ }
+
+ test {Module client blocked on keys (with metadata): Blocked, case 2} {
+ r del k
+ r fsl.push k 32
+ set rd [redis_deferring_client]
+ $rd fsl.bpopgt k 35 0
+ wait_for_blocked_clients_count 1
+ r fsl.push k 33
+ r fsl.push k 34
+ r fsl.push k 35
+ r fsl.push k 36
+ assert_equal {36} [$rd read]
+ }
+
+ test {Module client blocked on keys (with metadata): Blocked, DEL} {
+ r del k
+ r fsl.push k 32
+ set rd [redis_deferring_client]
+ $rd fsl.bpopgt k 35 0
+ wait_for_blocked_clients_count 1
+ r del k
+ assert_error {*UNBLOCKED key no longer exists*} {$rd read}
+ }
+
+ test {Module client blocked on keys (with metadata): Blocked, FLUSHALL} {
+ r del k
+ r fsl.push k 32
+ set rd [redis_deferring_client]
+ $rd fsl.bpopgt k 35 0
+ wait_for_blocked_clients_count 1
+ r flushall
+ assert_error {*UNBLOCKED key no longer exists*} {$rd read}
+ }
+
+ test {Module client blocked on keys (with metadata): Blocked, SWAPDB, no key} {
+ r select 9
+ r del k
+ r fsl.push k 32
+ set rd [redis_deferring_client]
+ $rd fsl.bpopgt k 35 0
+ wait_for_blocked_clients_count 1
+ r swapdb 0 9
+ assert_error {*UNBLOCKED key no longer exists*} {$rd read}
+ }
+
+ test {Module client blocked on keys (with metadata): Blocked, SWAPDB, key exists, case 1} {
+ ;# Key exists on other db, but wrong type
+ r flushall
+ r select 9
+ r fsl.push k 32
+ r select 0
+ r lpush k 38
+ r select 9
+ set rd [redis_deferring_client]
+ $rd fsl.bpopgt k 35 0
+ wait_for_blocked_clients_count 1
+ r swapdb 0 9
+ assert_error {*UNBLOCKED key no longer exists*} {$rd read}
+ r select 9
+ }
+
+ test {Module client blocked on keys (with metadata): Blocked, SWAPDB, key exists, case 2} {
+ ;# Key exists on other db, with the right type, but the value doesn't allow to unblock
+ r flushall
+ r select 9
+ r fsl.push k 32
+ r select 0
+ r fsl.push k 34
+ r select 9
+ set rd [redis_deferring_client]
+ $rd fsl.bpopgt k 35 0
+ wait_for_blocked_clients_count 1
+ r swapdb 0 9
+ assert_equal {1} [s 0 blocked_clients]
+ r fsl.push k 38
+ assert_equal {38} [$rd read]
+ r select 9
+ }
+
+ test {Module client blocked on keys (with metadata): Blocked, SWAPDB, key exists, case 3} {
+ ;# Key exists on other db, with the right type, the value allows to unblock
+ r flushall
+ r select 9
+ r fsl.push k 32
+ r select 0
+ r fsl.push k 38
+ r select 9
+ set rd [redis_deferring_client]
+ $rd fsl.bpopgt k 35 0
+ wait_for_blocked_clients_count 1
+ r swapdb 0 9
+ assert_equal {38} [$rd read]
+ r select 9
+ }
+
+ test {Module client blocked on keys (with metadata): Blocked, CLIENT KILL} {
+ r del k
+ r fsl.push k 32
+ set rd [redis_deferring_client]
+ $rd client id
+ set cid [$rd read]
+ $rd fsl.bpopgt k 35 0
+ wait_for_blocked_clients_count 1
+ r client kill id $cid ;# try to smoke-out client-related memory leak
+ }
+
+ test {Module client blocked on keys (with metadata): Blocked, CLIENT UNBLOCK TIMEOUT} {
+ r del k
+ r fsl.push k 32
+ set rd [redis_deferring_client]
+ $rd client id
+ set cid [$rd read]
+ $rd fsl.bpopgt k 35 0
+ wait_for_blocked_clients_count 1
+ r client unblock $cid timeout ;# try to smoke-out client-related memory leak
+ assert_equal {Request timedout} [$rd read]
+ }
+
+ test {Module client blocked on keys (with metadata): Blocked, CLIENT UNBLOCK ERROR} {
+ r del k
+ r fsl.push k 32
+ set rd [redis_deferring_client]
+ $rd client id
+ set cid [$rd read]
+ $rd fsl.bpopgt k 35 0
+ wait_for_blocked_clients_count 1
+ r client unblock $cid error ;# try to smoke-out client-related memory leak
+ assert_error "*unblocked*" {$rd read}
+ }
+
+ test {Module client blocked on keys, no timeout CB, CLIENT UNBLOCK TIMEOUT} {
+ r del k
+ set rd [redis_deferring_client]
+ $rd client id
+ set cid [$rd read]
+ $rd fsl.bpop k 0 NO_TO_CB
+ wait_for_blocked_clients_count 1
+ assert_equal [r client unblock $cid timeout] {0}
+ $rd close
+ }
+
+ test {Module client blocked on keys, no timeout CB, CLIENT UNBLOCK ERROR} {
+ r del k
+ set rd [redis_deferring_client]
+ $rd client id
+ set cid [$rd read]
+ $rd fsl.bpop k 0 NO_TO_CB
+ wait_for_blocked_clients_count 1
+ assert_equal [r client unblock $cid error] {0}
+ $rd close
+ }
+
+ test {Module client re-blocked on keys after woke up on wrong type} {
+ r del k
+ set rd [redis_deferring_client]
+ $rd fsl.bpop k 0
+ wait_for_blocked_clients_count 1
+ r lpush k 12
+ r lpush k 13
+ r lpush k 14
+ r del k
+ r fsl.push k 34
+ assert_equal {34} [$rd read]
+ assert_equal {1} [r get fsl_wrong_type] ;# first lpush caused one wrong-type wake-up
+ }
+
+ test {Module client blocked on keys woken up by LPUSH} {
+ r del k
+ set rd [redis_deferring_client]
+ $rd blockonkeys.popall k
+ wait_for_blocked_clients_count 1
+ r lpush k 42 squirrel banana
+ assert_equal {banana squirrel 42} [$rd read]
+ $rd close
+ }
+
+ test {Module client unblocks BLPOP} {
+ r del k
+ set rd [redis_deferring_client]
+ $rd blpop k 3
+ wait_for_blocked_clients_count 1
+ r blockonkeys.lpush k 42
+ assert_equal {k 42} [$rd read]
+ $rd close
+ }
+
+ test {Module unblocks module blocked on non-empty list} {
+ r del k
+ r lpush k aa
+ # Module client blocks to pop 5 elements from list
+ set rd [redis_deferring_client]
+ $rd blockonkeys.blpopn k 5
+ wait_for_blocked_clients_count 1
+ # Check that RM_SignalKeyAsReady() can wake up BLPOPN
+ r blockonkeys.lpush_unblock k bb cc ;# Not enough elements for BLPOPN
+ r lpush k dd ee ff ;# Doesn't unblock module
+ r blockonkeys.lpush_unblock k gg ;# Unblocks module
+ assert_equal {gg ff ee dd cc} [$rd read]
+ $rd close
+ }
+
+ test {Module explicit unblock when blocked on keys} {
+ r del k
+ r set somekey someval
+ # Module client blocks to pop 5 elements from list
+ set rd [redis_deferring_client]
+ $rd blockonkeys.blpopn_or_unblock k 5 0
+ wait_for_blocked_clients_count 1
+ # will now cause the module to trigger pop but instead will unblock the client from the reply_callback
+ r lpush k dd
+ # we should still get unblocked as the command should not reprocess
+ wait_for_blocked_clients_count 0
+ assert_equal {Action aborted} [$rd read]
+ $rd get somekey
+ assert_equal {someval} [$rd read]
+ $rd close
+ }
+
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+ start_server [list overrides [list loadmodule "$testmodule"]] {
+ set replica [srv 0 client]
+ set replica_host [srv 0 host]
+ set replica_port [srv 0 port]
+
+ # Start the replication process...
+ $replica replicaof $master_host $master_port
+ wait_for_sync $replica
+
+ test {WAIT command on module blocked client on keys} {
+ set rd [redis_deferring_client -1]
+ $rd set x y
+ $rd read
+
+ pause_process [srv 0 pid]
+
+ $master del k
+ $rd fsl.bpop k 0
+ wait_for_blocked_client -1
+ $master fsl.push k 34
+ $master fsl.push k 35
+ assert_equal {34} [$rd read]
+
+ assert_equal [$master wait 1 1000] 0
+ resume_process [srv 0 pid]
+ assert_equal [$master wait 1 1000] 1
+ $rd close
+ assert_equal {35} [$replica fsl.getall k]
+ }
+ }
+
+}
diff --git a/tests/unit/moduleapi/cluster.tcl b/tests/unit/moduleapi/cluster.tcl
new file mode 100644
index 0000000..8075083
--- /dev/null
+++ b/tests/unit/moduleapi/cluster.tcl
@@ -0,0 +1,222 @@
+# Primitive tests on cluster-enabled redis with modules
+
+source tests/support/cli.tcl
+
+# cluster creation is complicated with TLS, and the current tests don't really need that coverage
+tags {tls:skip external:skip cluster modules} {
+
+set testmodule_nokey [file normalize tests/modules/blockonbackground.so]
+set testmodule_blockedclient [file normalize tests/modules/blockedclient.so]
+set testmodule [file normalize tests/modules/blockonkeys.so]
+
+set modules [list loadmodule $testmodule loadmodule $testmodule_nokey loadmodule $testmodule_blockedclient]
+start_cluster 3 0 [list config_lines $modules] {
+
+ set node1 [srv 0 client]
+ set node2 [srv -1 client]
+ set node3 [srv -2 client]
+ set node3_pid [srv -2 pid]
+
+ test "Run blocking command (blocked on key) on cluster node3" {
+ # key9184688 is mapped to slot 10923 (first slot of node 3)
+ set node3_rd [redis_deferring_client -2]
+ $node3_rd fsl.bpop key9184688 0
+ $node3_rd flush
+ wait_for_condition 50 100 {
+ [s -2 blocked_clients] eq {1}
+ } else {
+ fail "Client executing blocking command (blocked on key) not blocked"
+ }
+ }
+
+ test "Run blocking command (no keys) on cluster node2" {
+ set node2_rd [redis_deferring_client -1]
+ $node2_rd block.block 0
+ $node2_rd flush
+
+ wait_for_condition 50 100 {
+ [s -1 blocked_clients] eq {1}
+ } else {
+ fail "Client executing blocking command (no keys) not blocked"
+ }
+ }
+
+
+ test "Perform a Resharding" {
+ exec src/redis-cli --cluster-yes --cluster reshard 127.0.0.1:[srv -2 port] \
+ --cluster-to [$node1 cluster myid] \
+ --cluster-from [$node3 cluster myid] \
+ --cluster-slots 1
+ }
+
+ test "Verify command (no keys) is unaffected after resharding" {
+ # verify there are blocked clients on node2
+ assert_equal [s -1 blocked_clients] {1}
+
+ #release client
+ $node2 block.release 0
+ }
+
+ test "Verify command (blocked on key) got unblocked after resharding" {
+ # this (read) will wait for the node3 to realize the new topology
+ assert_error {*MOVED*} {$node3_rd read}
+
+ # verify there are no blocked clients
+ assert_equal [s 0 blocked_clients] {0}
+ assert_equal [s -1 blocked_clients] {0}
+ assert_equal [s -2 blocked_clients] {0}
+ }
+
+ test "Wait for cluster to be stable" {
+ wait_for_condition 1000 50 {
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv 0 port]}] == 0 &&
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv -1 port]}] == 0 &&
+ [catch {exec src/redis-cli --cluster check 127.0.0.1:[srv -2 port]}] == 0 &&
+ [CI 0 cluster_state] eq {ok} &&
+ [CI 1 cluster_state] eq {ok} &&
+ [CI 2 cluster_state] eq {ok}
+ } else {
+ fail "Cluster doesn't stabilize"
+ }
+ }
+
+ test "Sanity test push cmd after resharding" {
+ assert_error {*MOVED*} {$node3 fsl.push key9184688 1}
+
+ set node1_rd [redis_deferring_client 0]
+ $node1_rd fsl.bpop key9184688 0
+ $node1_rd flush
+
+ wait_for_condition 50 100 {
+ [s 0 blocked_clients] eq {1}
+ } else {
+ puts "Client not blocked"
+ puts "read from blocked client: [$node1_rd read]"
+ fail "Client not blocked"
+ }
+
+ $node1 fsl.push key9184688 2
+ assert_equal {2} [$node1_rd read]
+ }
+
+ $node1_rd close
+ $node2_rd close
+ $node3_rd close
+
+ test "Run blocking command (blocked on key) again on cluster node1" {
+ $node1 del key9184688
+ # key9184688 is mapped to slot 10923 which has been moved to node1
+ set node1_rd [redis_deferring_client 0]
+ $node1_rd fsl.bpop key9184688 0
+ $node1_rd flush
+
+ wait_for_condition 50 100 {
+ [s 0 blocked_clients] eq {1}
+ } else {
+ fail "Client executing blocking command (blocked on key) again not blocked"
+ }
+ }
+
+ test "Run blocking command (no keys) again on cluster node2" {
+ set node2_rd [redis_deferring_client -1]
+
+ $node2_rd block.block 0
+ $node2_rd flush
+
+ wait_for_condition 50 100 {
+ [s -1 blocked_clients] eq {1}
+ } else {
+ fail "Client executing blocking command (no keys) again not blocked"
+ }
+ }
+
+ test "Kill a cluster node and wait for fail state" {
+ # kill node3 in cluster
+ pause_process $node3_pid
+
+ wait_for_condition 1000 50 {
+ [CI 0 cluster_state] eq {fail} &&
+ [CI 1 cluster_state] eq {fail}
+ } else {
+ fail "Cluster doesn't fail"
+ }
+ }
+
+ test "Verify command (blocked on key) got unblocked after cluster failure" {
+ assert_error {*CLUSTERDOWN*} {$node1_rd read}
+ }
+
+ test "Verify command (no keys) got unblocked after cluster failure" {
+ assert_error {*CLUSTERDOWN*} {$node2_rd read}
+
+ # verify there are no blocked clients
+ assert_equal [s 0 blocked_clients] {0}
+ assert_equal [s -1 blocked_clients] {0}
+ }
+
+ test "Verify command RM_Call is rejected when cluster is down" {
+ assert_error "ERR Can not execute a command 'set' while the cluster is down" {$node1 do_rm_call set x 1}
+ }
+
+ resume_process $node3_pid
+ $node1_rd close
+ $node2_rd close
+}
+
+set modules [list loadmodule [file normalize tests/modules/keyspace_events.so]]
+start_cluster 2 2 [list config_lines $modules] {
+
+ set master1 [srv 0 client]
+ set master2 [srv -1 client]
+ set replica1 [srv -2 client]
+ set replica2 [srv -3 client]
+
+ test "Verify keys deletion and notification effects happened on cluster slots change are replicated inside multi exec" {
+ $master2 set count_dels_{4oi} 1
+ $master2 del count_dels_{4oi}
+ assert_equal 1 [$master2 keyspace.get_dels]
+ assert_equal 1 [$replica2 keyspace.get_dels]
+ $master2 set count_dels_{4oi} 1
+
+ set repl [attach_to_replication_stream_on_connection -3]
+
+ $master1 cluster bumpepoch
+ $master1 cluster setslot 16382 node [$master1 cluster myid]
+
+ wait_for_cluster_propagation
+ wait_for_condition 50 100 {
+ [$master2 keyspace.get_dels] eq 2
+ } else {
+ fail "master did not delete the key"
+ }
+ wait_for_condition 50 100 {
+ [$replica2 keyspace.get_dels] eq 2
+ } else {
+ fail "replica did not increase del counter"
+ }
+
+ assert_replication_stream $repl {
+ {multi}
+ {del count_dels_{4oi}}
+ {keyspace.incr_dels}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+}
+
+}
+
+set testmodule [file normalize tests/modules/basics.so]
+set modules [list loadmodule $testmodule]
+start_cluster 3 0 [list config_lines $modules] {
+ set node1 [srv 0 client]
+ set node2 [srv -1 client]
+ set node3 [srv -2 client]
+
+ test "Verify RM_Call inside module load function on cluster mode" {
+ assert_equal {PONG} [$node1 PING]
+ assert_equal {PONG} [$node2 PING]
+ assert_equal {PONG} [$node3 PING]
+ }
+}
diff --git a/tests/unit/moduleapi/cmdintrospection.tcl b/tests/unit/moduleapi/cmdintrospection.tcl
new file mode 100644
index 0000000..6ba69a1
--- /dev/null
+++ b/tests/unit/moduleapi/cmdintrospection.tcl
@@ -0,0 +1,50 @@
+set testmodule [file normalize tests/modules/cmdintrospection.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ # cmdintrospection.xadd mimics XADD with regards to how
+ # what COMMAND exposes. There are two differences:
+ #
+ # 1. cmdintrospection.xadd (and all module commands) do not have ACL categories
+ # 2. cmdintrospection.xadd's `group` is "module"
+ #
+ # This tests verify that, apart from the above differences, the output of
+ # COMMAND INFO and COMMAND DOCS are identical for the two commands.
+ test "Module command introspection via COMMAND INFO" {
+ set redis_reply [lindex [r command info xadd] 0]
+ set module_reply [lindex [r command info cmdintrospection.xadd] 0]
+ for {set i 1} {$i < [llength $redis_reply]} {incr i} {
+ if {$i == 2} {
+ # Remove the "module" flag
+ set mylist [lindex $module_reply $i]
+ set idx [lsearch $mylist "module"]
+ set mylist [lreplace $mylist $idx $idx]
+ lset module_reply $i $mylist
+ }
+ if {$i == 6} {
+ # Skip ACL categories
+ continue
+ }
+ assert_equal [lindex $redis_reply $i] [lindex $module_reply $i]
+ }
+ }
+
+ test "Module command introspection via COMMAND DOCS" {
+ set redis_reply [dict create {*}[lindex [r command docs xadd] 1]]
+ set module_reply [dict create {*}[lindex [r command docs cmdintrospection.xadd] 1]]
+ # Compare the maps. We need to pop "group" first.
+ dict unset redis_reply group
+ dict unset module_reply group
+ dict unset module_reply module
+ if {$::log_req_res} {
+ dict unset redis_reply reply_schema
+ }
+
+ assert_equal $redis_reply $module_reply
+ }
+
+ test "Unload the module - cmdintrospection" {
+ assert_equal {OK} [r module unload cmdintrospection]
+ }
+}
diff --git a/tests/unit/moduleapi/commandfilter.tcl b/tests/unit/moduleapi/commandfilter.tcl
new file mode 100644
index 0000000..72b16ec
--- /dev/null
+++ b/tests/unit/moduleapi/commandfilter.tcl
@@ -0,0 +1,175 @@
+set testmodule [file normalize tests/modules/commandfilter.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule log-key 0
+
+ test {Retain a command filter argument} {
+ # Retain an argument now. Later we'll try to re-read it and make sure
+ # it is not corrupt and that valgrind does not complain.
+ r rpush some-list @retain my-retained-string
+ r commandfilter.retained
+ } {my-retained-string}
+
+ test {Command Filter handles redirected commands} {
+ r set mykey @log
+ r lrange log-key 0 -1
+ } "{set mykey @log}"
+
+ test {Command Filter can call RedisModule_CommandFilterArgDelete} {
+ r rpush mylist elem1 @delme elem2
+ r lrange mylist 0 -1
+ } {elem1 elem2}
+
+ test {Command Filter can call RedisModule_CommandFilterArgInsert} {
+ r del mylist
+ r rpush mylist elem1 @insertbefore elem2 @insertafter elem3
+ r lrange mylist 0 -1
+ } {elem1 --inserted-before-- @insertbefore elem2 @insertafter --inserted-after-- elem3}
+
+ test {Command Filter can call RedisModule_CommandFilterArgReplace} {
+ r del mylist
+ r rpush mylist elem1 @replaceme elem2
+ r lrange mylist 0 -1
+ } {elem1 --replaced-- elem2}
+
+ test {Command Filter applies on RM_Call() commands} {
+ r del log-key
+ r commandfilter.ping
+ r lrange log-key 0 -1
+ } "{ping @log}"
+
+ test {Command Filter applies on Lua redis.call()} {
+ r del log-key
+ r eval "redis.call('ping', '@log')" 0
+ r lrange log-key 0 -1
+ } "{ping @log}"
+
+ test {Command Filter applies on Lua redis.call() that calls a module} {
+ r del log-key
+ r eval "redis.call('commandfilter.ping')" 0
+ r lrange log-key 0 -1
+ } "{ping @log}"
+
+ test {Command Filter strings can be retained} {
+ r commandfilter.retained
+ } {my-retained-string}
+
+ test {Command Filter is unregistered implicitly on module unload} {
+ r del log-key
+ r module unload commandfilter
+ r set mykey @log
+ r lrange log-key 0 -1
+ } {}
+
+ r module load $testmodule log-key 0
+
+ test {Command Filter unregister works as expected} {
+ # Validate reloading succeeded
+ r del log-key
+ r set mykey @log
+ assert_equal "{set mykey @log}" [r lrange log-key 0 -1]
+
+ # Unregister
+ r commandfilter.unregister
+ r del log-key
+
+ r set mykey @log
+ r lrange log-key 0 -1
+ } {}
+
+ r module unload commandfilter
+ r module load $testmodule log-key 1
+
+ test {Command Filter REDISMODULE_CMDFILTER_NOSELF works as expected} {
+ r set mykey @log
+ assert_equal "{set mykey @log}" [r lrange log-key 0 -1]
+
+ r del log-key
+ r commandfilter.ping
+ assert_equal {} [r lrange log-key 0 -1]
+
+ r eval "redis.call('commandfilter.ping')" 0
+ assert_equal {} [r lrange log-key 0 -1]
+ }
+
+ test "Unload the module - commandfilter" {
+ assert_equal {OK} [r module unload commandfilter]
+ }
+}
+
+test {RM_CommandFilterArgInsert and script argv caching} {
+ # coverage for scripts calling commands that expand the argv array
+ # an attempt to add coverage for a possible bug in luaArgsToRedisArgv
+ # this test needs a fresh server so that lua_argv_size is 0.
+ # glibc realloc can return the same pointer even when the size changes
+ # still this test isn't able to trigger the issue, but we keep it anyway.
+ start_server {tags {"modules"}} {
+ r module load $testmodule log-key 0
+ r del mylist
+ # command with 6 args
+ r eval {redis.call('rpush', KEYS[1], 'elem1', 'elem2', 'elem3', 'elem4')} 1 mylist
+ # command with 3 args that is changed to 4
+ r eval {redis.call('rpush', KEYS[1], '@insertafter')} 1 mylist
+ # command with 6 args again
+ r eval {redis.call('rpush', KEYS[1], 'elem1', 'elem2', 'elem3', 'elem4')} 1 mylist
+ assert_equal [r lrange mylist 0 -1] {elem1 elem2 elem3 elem4 @insertafter --inserted-after-- elem1 elem2 elem3 elem4}
+ }
+}
+
+# previously, there was a bug that command filters would be rerun (which would cause args to swap back)
+# this test is meant to protect against that bug
+test {Blocking Commands don't run through command filter when reprocessed} {
+ start_server {tags {"modules"}} {
+ r module load $testmodule log-key 0
+
+ r del list1{t}
+ r del list2{t}
+
+ r lpush list2{t} a b c d e
+
+ set rd [redis_deferring_client]
+ # we're asking to pop from the left, but the command filter swaps the two arguments,
+ # if it didn't swap it, we would end up with e d c b a 5 (5 being the left most of the following lpush)
+ # but since we swap the arguments, we end up with 1 e d c b a (1 being the right most of it).
+ # if the command filter would run again on unblock, they would be swapped back.
+ $rd blmove list1{t} list2{t} left right 0
+ wait_for_blocked_client
+ r lpush list1{t} 1 2 3 4 5
+ # validate that we moved the correct element with the swapped args
+ assert_equal [$rd read] 1
+ # validate that we moved the correct elements to the correct side of the list
+ assert_equal [r lpop list2{t}] 1
+
+ $rd close
+ }
+}
+
+test {Filtering based on client id} {
+ start_server {tags {"modules"}} {
+ r module load $testmodule log-key 0
+
+ set rr [redis_client]
+ set cid [$rr client id]
+ r unfilter_clientid $cid
+
+ r rpush mylist elem1 @replaceme elem2
+ assert_equal [r lrange mylist 0 -1] {elem1 --replaced-- elem2}
+
+ r del mylist
+
+ assert_equal [$rr rpush mylist elem1 @replaceme elem2] 3
+ assert_equal [r lrange mylist 0 -1] {elem1 @replaceme elem2}
+
+ $rr close
+ }
+}
+
+start_server {} {
+ test {OnLoad failure will handle un-registration} {
+ catch {r module load $testmodule log-key 0 noload}
+ r set mykey @log
+ assert_equal [r lrange log-key 0 -1] {}
+ r rpush mylist elem1 @delme elem2
+ assert_equal [r lrange mylist 0 -1] {elem1 @delme elem2}
+ }
+}
diff --git a/tests/unit/moduleapi/datatype.tcl b/tests/unit/moduleapi/datatype.tcl
new file mode 100644
index 0000000..951c060
--- /dev/null
+++ b/tests/unit/moduleapi/datatype.tcl
@@ -0,0 +1,134 @@
+set testmodule [file normalize tests/modules/datatype.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {DataType: Test module is sane, GET/SET work.} {
+ r datatype.set dtkey 100 stringval
+ assert {[r datatype.get dtkey] eq {100 stringval}}
+ }
+
+ test {test blocking of datatype creation outside of OnLoad} {
+ assert_equal [r block.create.datatype.outside.onload] OK
+ }
+
+ test {DataType: RM_SaveDataTypeToString(), RM_LoadDataTypeFromStringEncver() work} {
+ r datatype.set dtkey -1111 MyString
+ set encoded [r datatype.dump dtkey]
+
+ assert {[r datatype.restore dtkeycopy $encoded 4] eq {4}}
+ assert {[r datatype.get dtkeycopy] eq {-1111 MyString}}
+ }
+
+ test {DataType: Handle truncated RM_LoadDataTypeFromStringEncver()} {
+ r datatype.set dtkey -1111 MyString
+ set encoded [r datatype.dump dtkey]
+ set truncated [string range $encoded 0 end-1]
+
+ catch {r datatype.restore dtkeycopy $truncated 4} e
+ set e
+ } {*Invalid*}
+
+ test {DataType: ModuleTypeReplaceValue() happy path works} {
+ r datatype.set key-a 1 AAA
+ r datatype.set key-b 2 BBB
+
+ assert {[r datatype.swap key-a key-b] eq {OK}}
+ assert {[r datatype.get key-a] eq {2 BBB}}
+ assert {[r datatype.get key-b] eq {1 AAA}}
+ }
+
+ test {DataType: ModuleTypeReplaceValue() fails on non-module keys} {
+ r datatype.set key-a 1 AAA
+ r set key-b RedisString
+
+ catch {r datatype.swap key-a key-b} e
+ set e
+ } {*ERR*}
+
+ test {DataType: Copy command works for modules} {
+ # Test failed copies
+ r datatype.set answer-to-universe 42 AAA
+ catch {r copy answer-to-universe answer2} e
+ assert_match {*module key failed to copy*} $e
+
+ # Our module's data type copy function copies the int value as-is
+ # but appends /<from-key>/<to-key> to the string value so we can
+ # track passed arguments.
+ r datatype.set sourcekey 1234 AAA
+ r copy sourcekey targetkey
+ r datatype.get targetkey
+ } {1234 AAA/sourcekey/targetkey}
+
+ test {DataType: Slow Loading} {
+ r config set busy-reply-threshold 5000 ;# make sure we're using a high default
+ # trigger slow loading
+ r datatype.slow_loading 1
+ set rd [redis_deferring_client]
+ set start [clock clicks -milliseconds]
+ $rd debug reload
+
+ # wait till we know we're blocked inside the module
+ wait_for_condition 50 100 {
+ [r datatype.is_in_slow_loading] eq 1
+ } else {
+ fail "Failed waiting for slow loading to start"
+ }
+
+ # make sure we get LOADING error, and that we didn't get here late (not waiting for busy-reply-threshold)
+ assert_error {*LOADING*} {r ping}
+ assert_lessthan [expr [clock clicks -milliseconds]-$start] 2000
+
+ # abort the blocking operation
+ r datatype.slow_loading 0
+ wait_for_condition 50 100 {
+ [s loading] eq {0}
+ } else {
+ fail "Failed waiting for loading to end"
+ }
+ $rd read
+ $rd close
+ }
+
+ test {DataType: check the type name} {
+ r flushdb
+ r datatype.set foo 111 bar
+ assert_type test___dt foo
+ }
+
+ test {SCAN module datatype} {
+ r flushdb
+ populate 1000
+ r datatype.set foo 111 bar
+ set type [r type foo]
+ set cur 0
+ set keys {}
+ while 1 {
+ set res [r scan $cur type $type]
+ set cur [lindex $res 0]
+ set k [lindex $res 1]
+ lappend keys {*}$k
+ if {$cur == 0} break
+ }
+
+ assert_equal 1 [llength $keys]
+ }
+
+ test {SCAN module datatype with case sensitive} {
+ r flushdb
+ populate 1000
+ r datatype.set foo 111 bar
+ set type "tEsT___dT"
+ set cur 0
+ set keys {}
+ while 1 {
+ set res [r scan $cur type $type]
+ set cur [lindex $res 0]
+ set k [lindex $res 1]
+ lappend keys {*}$k
+ if {$cur == 0} break
+ }
+
+ assert_equal 1 [llength $keys]
+ }
+}
diff --git a/tests/unit/moduleapi/datatype2.tcl b/tests/unit/moduleapi/datatype2.tcl
new file mode 100644
index 0000000..95acc9a
--- /dev/null
+++ b/tests/unit/moduleapi/datatype2.tcl
@@ -0,0 +1,232 @@
+set testmodule [file normalize tests/modules/datatype2.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test "datatype2: test mem alloc and free" {
+ r flushall
+
+ r select 0
+ assert_equal 3 [r mem.alloc k1 3]
+ assert_equal 2 [r mem.alloc k2 2]
+
+ r select 1
+ assert_equal 1 [r mem.alloc k1 1]
+ assert_equal 5 [r mem.alloc k2 5]
+
+ r select 0
+ assert_equal 1 [r mem.free k1]
+ assert_equal 1 [r mem.free k2]
+
+ r select 1
+ assert_equal 1 [r mem.free k1]
+ assert_equal 1 [r mem.free k2]
+ }
+
+ test "datatype2: test del and unlink" {
+ r flushall
+
+ assert_equal 100 [r mem.alloc k1 100]
+ assert_equal 60 [r mem.alloc k2 60]
+
+ assert_equal 1 [r unlink k1]
+ assert_equal 1 [r del k2]
+ }
+
+ test "datatype2: test read and write" {
+ r flushall
+
+ assert_equal 3 [r mem.alloc k1 3]
+
+ set data datatype2
+ assert_equal [string length $data] [r mem.write k1 0 $data]
+ assert_equal $data [r mem.read k1 0]
+ }
+
+ test "datatype2: test rdb save and load" {
+ r flushall
+
+ r select 0
+ set data k1
+ assert_equal 3 [r mem.alloc k1 3]
+ assert_equal [string length $data] [r mem.write k1 1 $data]
+
+ set data k2
+ assert_equal 2 [r mem.alloc k2 2]
+ assert_equal [string length $data] [r mem.write k2 0 $data]
+
+ r select 1
+ set data k3
+ assert_equal 3 [r mem.alloc k3 3]
+ assert_equal [string length $data] [r mem.write k3 1 $data]
+
+ set data k4
+ assert_equal 2 [r mem.alloc k4 2]
+ assert_equal [string length $data] [r mem.write k4 0 $data]
+
+ r bgsave
+ waitForBgsave r
+ r debug reload
+
+ r select 0
+ assert_equal k1 [r mem.read k1 1]
+ assert_equal k2 [r mem.read k2 0]
+
+ r select 1
+ assert_equal k3 [r mem.read k3 1]
+ assert_equal k4 [r mem.read k4 0]
+ }
+
+ test "datatype2: test aof rewrite" {
+ r flushall
+
+ r select 0
+ set data k1
+ assert_equal 3 [r mem.alloc k1 3]
+ assert_equal [string length $data] [r mem.write k1 1 $data]
+
+ set data k2
+ assert_equal 2 [r mem.alloc k2 2]
+ assert_equal [string length $data] [r mem.write k2 0 $data]
+
+ r select 1
+ set data k3
+ assert_equal 3 [r mem.alloc k3 3]
+ assert_equal [string length $data] [r mem.write k3 1 $data]
+
+ set data k4
+ assert_equal 2 [r mem.alloc k4 2]
+ assert_equal [string length $data] [r mem.write k4 0 $data]
+
+ r bgrewriteaof
+ waitForBgrewriteaof r
+ r debug loadaof
+
+ r select 0
+ assert_equal k1 [r mem.read k1 1]
+ assert_equal k2 [r mem.read k2 0]
+
+ r select 1
+ assert_equal k3 [r mem.read k3 1]
+ assert_equal k4 [r mem.read k4 0]
+ }
+
+ test "datatype2: test copy" {
+ r flushall
+
+ r select 0
+ set data k1
+ assert_equal 3 [r mem.alloc k1 3]
+ assert_equal [string length $data] [r mem.write k1 1 $data]
+ assert_equal $data [r mem.read k1 1]
+
+ set data k2
+ assert_equal 2 [r mem.alloc k2 2]
+ assert_equal [string length $data] [r mem.write k2 0 $data]
+ assert_equal $data [r mem.read k2 0]
+
+ r select 1
+ set data k3
+ assert_equal 3 [r mem.alloc k3 3]
+ assert_equal [string length $data] [r mem.write k3 1 $data]
+
+ set data k4
+ assert_equal 2 [r mem.alloc k4 2]
+ assert_equal [string length $data] [r mem.write k4 0 $data]
+
+ assert_equal {total 5 used 2} [r mem.usage 0]
+ assert_equal {total 5 used 2} [r mem.usage 1]
+
+ r select 0
+ assert_equal 1 [r copy k1 k3]
+ assert_equal k1 [r mem.read k3 1]
+ assert_equal {total 8 used 3} [r mem.usage 0]
+ assert_equal 1 [r copy k2 k1 db 1]
+
+ r select 1
+ assert_equal k2 [r mem.read k1 0]
+ assert_equal {total 8 used 3} [r mem.usage 0]
+ assert_equal {total 7 used 3} [r mem.usage 1]
+ }
+
+ test "datatype2: test swapdb" {
+ r flushall
+
+ r select 0
+ set data k1
+ assert_equal 5 [r mem.alloc k1 5]
+ assert_equal [string length $data] [r mem.write k1 1 $data]
+ assert_equal $data [r mem.read k1 1]
+
+ set data k2
+ assert_equal 4 [r mem.alloc k2 4]
+ assert_equal [string length $data] [r mem.write k2 0 $data]
+ assert_equal $data [r mem.read k2 0]
+
+ r select 1
+ set data k1
+ assert_equal 3 [r mem.alloc k3 3]
+ assert_equal [string length $data] [r mem.write k3 1 $data]
+
+ set data k2
+ assert_equal 2 [r mem.alloc k4 2]
+ assert_equal [string length $data] [r mem.write k4 0 $data]
+
+ assert_equal {total 9 used 2} [r mem.usage 0]
+ assert_equal {total 5 used 2} [r mem.usage 1]
+
+ assert_equal OK [r swapdb 0 1]
+ assert_equal {total 9 used 2} [r mem.usage 1]
+ assert_equal {total 5 used 2} [r mem.usage 0]
+ }
+
+ test "datatype2: test digest" {
+ r flushall
+
+ r select 0
+ set data k1
+ assert_equal 3 [r mem.alloc k1 3]
+ assert_equal [string length $data] [r mem.write k1 1 $data]
+ assert_equal $data [r mem.read k1 1]
+
+ set data k2
+ assert_equal 2 [r mem.alloc k2 2]
+ assert_equal [string length $data] [r mem.write k2 0 $data]
+ assert_equal $data [r mem.read k2 0]
+
+ r select 1
+ set data k1
+ assert_equal 3 [r mem.alloc k1 3]
+ assert_equal [string length $data] [r mem.write k1 1 $data]
+ assert_equal $data [r mem.read k1 1]
+
+ set data k2
+ assert_equal 2 [r mem.alloc k2 2]
+ assert_equal [string length $data] [r mem.write k2 0 $data]
+ assert_equal $data [r mem.read k2 0]
+
+ r select 0
+ set digest0 [debug_digest]
+
+ r select 1
+ set digest1 [debug_digest]
+
+ assert_equal $digest0 $digest1
+ }
+
+ test "datatype2: test memusage" {
+ r flushall
+
+ set data k1
+ assert_equal 3 [r mem.alloc k1 3]
+ assert_equal [string length $data] [r mem.write k1 1 $data]
+ assert_equal $data [r mem.read k1 1]
+
+ set data k2
+ assert_equal 3 [r mem.alloc k2 3]
+ assert_equal [string length $data] [r mem.write k2 0 $data]
+ assert_equal $data [r mem.read k2 0]
+
+ assert_equal [memory_usage k1] [memory_usage k2]
+ }
+} \ No newline at end of file
diff --git a/tests/unit/moduleapi/defrag.tcl b/tests/unit/moduleapi/defrag.tcl
new file mode 100644
index 0000000..b2e2396
--- /dev/null
+++ b/tests/unit/moduleapi/defrag.tcl
@@ -0,0 +1,46 @@
+set testmodule [file normalize tests/modules/defragtest.so]
+
+start_server {tags {"modules"} overrides {{save ""}}} {
+ r module load $testmodule 10000
+ r config set hz 100
+ r config set active-defrag-ignore-bytes 1
+ r config set active-defrag-threshold-lower 0
+ r config set active-defrag-cycle-min 99
+
+ # try to enable active defrag, it will fail if redis was compiled without it
+ catch {r config set activedefrag yes} e
+ if {[r config get activedefrag] eq "activedefrag yes"} {
+
+ test {Module defrag: simple key defrag works} {
+ r frag.create key1 1 1000 0
+
+ after 2000
+ set info [r info defragtest_stats]
+ assert {[getInfoProperty $info defragtest_datatype_attempts] > 0}
+ assert_equal 0 [getInfoProperty $info defragtest_datatype_resumes]
+ }
+
+ test {Module defrag: late defrag with cursor works} {
+ r flushdb
+ r frag.resetstats
+
+ # key can only be defragged in no less than 10 iterations
+ # due to maxstep
+ r frag.create key2 10000 100 1000
+
+ after 2000
+ set info [r info defragtest_stats]
+ assert {[getInfoProperty $info defragtest_datatype_resumes] > 10}
+ assert_equal 0 [getInfoProperty $info defragtest_datatype_wrong_cursor]
+ }
+
+ test {Module defrag: global defrag works} {
+ r flushdb
+ r frag.resetstats
+
+ after 2000
+ set info [r info defragtest_stats]
+ assert {[getInfoProperty $info defragtest_global_attempts] > 0}
+ }
+ }
+}
diff --git a/tests/unit/moduleapi/eventloop.tcl b/tests/unit/moduleapi/eventloop.tcl
new file mode 100644
index 0000000..81e01ca
--- /dev/null
+++ b/tests/unit/moduleapi/eventloop.tcl
@@ -0,0 +1,28 @@
+set testmodule [file normalize tests/modules/eventloop.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test "Module eventloop sendbytes" {
+ assert_match "OK" [r test.sendbytes 5000000]
+ assert_match "OK" [r test.sendbytes 2000000]
+ }
+
+ test "Module eventloop iteration" {
+ set iteration [r test.iteration]
+ set next_iteration [r test.iteration]
+ assert {$next_iteration > $iteration}
+ }
+
+ test "Module eventloop sanity" {
+ r test.sanity
+ }
+
+ test "Module eventloop oneshot" {
+ r test.oneshot
+ }
+
+ test "Unload the module - eventloop" {
+ assert_equal {OK} [r module unload eventloop]
+ }
+}
diff --git a/tests/unit/moduleapi/fork.tcl b/tests/unit/moduleapi/fork.tcl
new file mode 100644
index 0000000..c89a6c5
--- /dev/null
+++ b/tests/unit/moduleapi/fork.tcl
@@ -0,0 +1,49 @@
+set testmodule [file normalize tests/modules/fork.so]
+
+proc count_log_message {pattern} {
+ set status [catch {exec grep -c $pattern < [srv 0 stdout]} result]
+ if {$status == 1} {
+ set result 0
+ }
+ return $result
+}
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {Module fork} {
+ # the argument to fork.create is the exitcode on termination
+ # the second argument to fork.create is passed to usleep
+ r fork.create 3 100000 ;# 100ms
+ wait_for_condition 20 100 {
+ [r fork.exitcode] != -1
+ } else {
+ fail "fork didn't terminate"
+ }
+ r fork.exitcode
+ } {3}
+
+ test {Module fork kill} {
+ # use a longer time to avoid the child exiting before being killed
+ r fork.create 3 100000000 ;# 100s
+ wait_for_condition 20 100 {
+ [count_log_message "fork child started"] == 2
+ } else {
+ fail "fork didn't start"
+ }
+
+ # module fork twice
+ assert_error {Fork failed} {r fork.create 0 1}
+ assert {[count_log_message "Can't fork for module: File exists"] eq "1"}
+
+ r fork.kill
+
+ assert {[count_log_message "Received SIGUSR1 in child"] eq "1"}
+ # check that it wasn't printed again (the print belong to the previous test)
+ assert {[count_log_message "fork child exiting"] eq "1"}
+ }
+
+ test "Unload the module - fork" {
+ assert_equal {OK} [r module unload fork]
+ }
+}
diff --git a/tests/unit/moduleapi/getchannels.tcl b/tests/unit/moduleapi/getchannels.tcl
new file mode 100644
index 0000000..e8f557d
--- /dev/null
+++ b/tests/unit/moduleapi/getchannels.tcl
@@ -0,0 +1,40 @@
+set testmodule [file normalize tests/modules/getchannels.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ # Channels are currently used to just validate ACLs, so test them here
+ r ACL setuser testuser +@all resetchannels &channel &pattern*
+
+ test "module getchannels-api with literals - ACL" {
+ assert_equal "OK" [r ACL DRYRUN testuser getchannels.command subscribe literal channel subscribe literal pattern1]
+ assert_equal "OK" [r ACL DRYRUN testuser getchannels.command publish literal channel publish literal pattern1]
+ assert_equal "OK" [r ACL DRYRUN testuser getchannels.command unsubscribe literal channel unsubscribe literal pattern1]
+
+ assert_equal "This user has no permissions to access the 'nopattern1' channel" [r ACL DRYRUN testuser getchannels.command subscribe literal channel subscribe literal nopattern1]
+ assert_equal "This user has no permissions to access the 'nopattern1' channel" [r ACL DRYRUN testuser getchannels.command publish literal channel subscribe literal nopattern1]
+ assert_equal "OK" [r ACL DRYRUN testuser getchannels.command unsubscribe literal channel unsubscribe literal nopattern1]
+
+ assert_equal "This user has no permissions to access the 'otherchannel' channel" [r ACL DRYRUN testuser getchannels.command subscribe literal otherchannel subscribe literal pattern1]
+ assert_equal "This user has no permissions to access the 'otherchannel' channel" [r ACL DRYRUN testuser getchannels.command publish literal otherchannel subscribe literal pattern1]
+ assert_equal "OK" [r ACL DRYRUN testuser getchannels.command unsubscribe literal otherchannel unsubscribe literal pattern1]
+ }
+
+ test "module getchannels-api with patterns - ACL" {
+ assert_equal "OK" [r ACL DRYRUN testuser getchannels.command subscribe pattern pattern*]
+ assert_equal "OK" [r ACL DRYRUN testuser getchannels.command publish pattern pattern*]
+ assert_equal "OK" [r ACL DRYRUN testuser getchannels.command unsubscribe pattern pattern*]
+
+ assert_equal "This user has no permissions to access the 'pattern1' channel" [r ACL DRYRUN testuser getchannels.command subscribe pattern pattern1 subscribe pattern pattern*]
+ assert_equal "This user has no permissions to access the 'pattern1' channel" [r ACL DRYRUN testuser getchannels.command publish pattern pattern1 subscribe pattern pattern*]
+ assert_equal "OK" [r ACL DRYRUN testuser getchannels.command unsubscribe pattern pattern1 unsubscribe pattern pattern*]
+
+ assert_equal "This user has no permissions to access the 'otherpattern*' channel" [r ACL DRYRUN testuser getchannels.command subscribe pattern otherpattern* subscribe pattern pattern*]
+ assert_equal "This user has no permissions to access the 'otherpattern*' channel" [r ACL DRYRUN testuser getchannels.command publish pattern otherpattern* subscribe pattern pattern*]
+ assert_equal "OK" [r ACL DRYRUN testuser getchannels.command unsubscribe pattern otherpattern* unsubscribe pattern pattern*]
+ }
+
+ test "Unload the module - getchannels" {
+ assert_equal {OK} [r module unload getchannels]
+ }
+}
diff --git a/tests/unit/moduleapi/getkeys.tcl b/tests/unit/moduleapi/getkeys.tcl
new file mode 100644
index 0000000..b84bb0f
--- /dev/null
+++ b/tests/unit/moduleapi/getkeys.tcl
@@ -0,0 +1,80 @@
+set testmodule [file normalize tests/modules/getkeys.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {COMMAND INFO correctly reports a movable keys module command} {
+ set info [lindex [r command info getkeys.command] 0]
+
+ assert_equal {module movablekeys} [lindex $info 2]
+ assert_equal {0} [lindex $info 3]
+ assert_equal {0} [lindex $info 4]
+ assert_equal {0} [lindex $info 5]
+ }
+
+ test {COMMAND GETKEYS correctly reports a movable keys module command} {
+ r command getkeys getkeys.command arg1 arg2 key key1 arg3 key key2 key key3
+ } {key1 key2 key3}
+
+ test {COMMAND GETKEYS correctly reports a movable keys module command using flags} {
+ r command getkeys getkeys.command_with_flags arg1 arg2 key key1 arg3 key key2 key key3
+ } {key1 key2 key3}
+
+ test {COMMAND GETKEYSANDFLAGS correctly reports a movable keys module command not using flags} {
+ r command getkeysandflags getkeys.command arg1 arg2 key key1 arg3 key key2
+ } {{key1 {RW access update}} {key2 {RW access update}}}
+
+ test {COMMAND GETKEYSANDFLAGS correctly reports a movable keys module command using flags} {
+ r command getkeysandflags getkeys.command_with_flags arg1 arg2 key key1 arg3 key key2 key key3
+ } {{key1 {RO access}} {key2 {RO access}} {key3 {RO access}}}
+
+ test {RM_GetCommandKeys on non-existing command} {
+ catch {r getkeys.introspect 0 non-command key1 key2} e
+ set _ $e
+ } {*ENOENT*}
+
+ test {RM_GetCommandKeys on built-in fixed keys command} {
+ r getkeys.introspect 0 set key1 value1
+ } {key1}
+
+ test {RM_GetCommandKeys on built-in fixed keys command with flags} {
+ r getkeys.introspect 1 set key1 value1
+ } {{key1 OW}}
+
+ test {RM_GetCommandKeys on EVAL} {
+ r getkeys.introspect 0 eval "" 4 key1 key2 key3 key4 arg1 arg2
+ } {key1 key2 key3 key4}
+
+ test {RM_GetCommandKeys on a movable keys module command} {
+ r getkeys.introspect 0 getkeys.command arg1 arg2 key key1 arg3 key key2 key key3
+ } {key1 key2 key3}
+
+ test {RM_GetCommandKeys on a non-movable module command} {
+ r getkeys.introspect 0 getkeys.fixed arg1 key1 key2 key3 arg2
+ } {key1 key2 key3}
+
+ test {RM_GetCommandKeys with bad arity} {
+ catch {r getkeys.introspect 0 set key} e
+ set _ $e
+ } {*EINVAL*}
+
+ # user that can only read from "read" keys, write to "write" keys, and read+write to "RW" keys
+ r ACL setuser testuser +@all %R~read* %W~write* %RW~rw*
+
+ test "module getkeys-api - ACL" {
+ # legacy triple didn't provide flags, so they require both read and write
+ assert_equal "OK" [r ACL DRYRUN testuser getkeys.command key rw]
+ assert_match {*has no permissions to access the 'read' key*} [r ACL DRYRUN testuser getkeys.command key read]
+ assert_match {*has no permissions to access the 'write' key*} [r ACL DRYRUN testuser getkeys.command key write]
+ }
+
+ test "module getkeys-api with flags - ACL" {
+ assert_equal "OK" [r ACL DRYRUN testuser getkeys.command_with_flags key rw]
+ assert_equal "OK" [r ACL DRYRUN testuser getkeys.command_with_flags key read]
+ assert_match {*has no permissions to access the 'write' key*} [r ACL DRYRUN testuser getkeys.command_with_flags key write]
+ }
+
+ test "Unload the module - getkeys" {
+ assert_equal {OK} [r module unload getkeys]
+ }
+}
diff --git a/tests/unit/moduleapi/hash.tcl b/tests/unit/moduleapi/hash.tcl
new file mode 100644
index 0000000..116b1c5
--- /dev/null
+++ b/tests/unit/moduleapi/hash.tcl
@@ -0,0 +1,27 @@
+set testmodule [file normalize tests/modules/hash.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {Module hash set} {
+ r set k mystring
+ assert_error "WRONGTYPE*" {r hash.set k "" hello world}
+ r del k
+ # "" = count updates and deletes of existing fields only
+ assert_equal 0 [r hash.set k "" squirrel yes]
+ # "a" = COUNT_ALL = count inserted, modified and deleted fields
+ assert_equal 2 [r hash.set k "a" banana no sushi whynot]
+ # "n" = NX = only add fields not already existing in the hash
+ # "x" = XX = only replace the value for existing fields
+ assert_equal 0 [r hash.set k "n" squirrel hoho what nothing]
+ assert_equal 1 [r hash.set k "na" squirrel hoho something nice]
+ assert_equal 0 [r hash.set k "xa" new stuff not inserted]
+ assert_equal 1 [r hash.set k "x" squirrel ofcourse]
+ assert_equal 1 [r hash.set k "" sushi :delete: none :delete:]
+ r hgetall k
+ } {squirrel ofcourse banana no what nothing something nice}
+
+ test "Unload the module - hash" {
+ assert_equal {OK} [r module unload hash]
+ }
+}
diff --git a/tests/unit/moduleapi/hooks.tcl b/tests/unit/moduleapi/hooks.tcl
new file mode 100644
index 0000000..94b0f6f
--- /dev/null
+++ b/tests/unit/moduleapi/hooks.tcl
@@ -0,0 +1,321 @@
+set testmodule [file normalize tests/modules/hooks.so]
+
+tags "modules" {
+ start_server [list overrides [list loadmodule "$testmodule" appendonly yes]] {
+ test {Test module aof save on server start from empty} {
+ assert {[r hooks.event_count persistence-syncaof-start] == 1}
+ }
+
+ test {Test clients connection / disconnection hooks} {
+ for {set j 0} {$j < 2} {incr j} {
+ set rd1 [redis_deferring_client]
+ $rd1 close
+ }
+ assert {[r hooks.event_count client-connected] > 1}
+ assert {[r hooks.event_count client-disconnected] > 1}
+ }
+
+ test {Test module client change event for blocked client} {
+ set rd [redis_deferring_client]
+ # select db other than 0
+ $rd select 1
+ # block on key
+ $rd brpop foo 0
+ # kill blocked client
+ r client kill skipme yes
+ # assert server is still up
+ assert_equal [r ping] PONG
+ $rd close
+ }
+
+ test {Test module cron hook} {
+ after 100
+ assert {[r hooks.event_count cron-loop] > 0}
+ set hz [r hooks.event_last cron-loop]
+ assert_equal $hz 10
+ }
+
+ test {Test module loaded / unloaded hooks} {
+ set othermodule [file normalize tests/modules/infotest.so]
+ r module load $othermodule
+ r module unload infotest
+ assert_equal [r hooks.event_last module-loaded] "infotest"
+ assert_equal [r hooks.event_last module-unloaded] "infotest"
+ }
+
+ test {Test module aofrw hook} {
+ r debug populate 1000 foo 10000 ;# 10mb worth of data
+ r config set rdbcompression no ;# rdb progress is only checked once in 2mb
+ r BGREWRITEAOF
+ waitForBgrewriteaof r
+ assert_equal [string match {*module-event-persistence-aof-start*} [exec tail -20 < [srv 0 stdout]]] 1
+ assert_equal [string match {*module-event-persistence-end*} [exec tail -20 < [srv 0 stdout]]] 1
+ }
+
+ test {Test module aof load and rdb/aof progress hooks} {
+ # create some aof tail (progress is checked only once in 1000 commands)
+ for {set j 0} {$j < 4000} {incr j} {
+ r set "bar$j" x
+ }
+ # set some configs that will cause many loading progress events during aof loading
+ r config set key-load-delay 500
+ r config set dynamic-hz no
+ r config set hz 500
+ r DEBUG LOADAOF
+ assert_equal [r hooks.event_last loading-aof-start] 0
+ assert_equal [r hooks.event_last loading-end] 0
+ assert {[r hooks.event_count loading-rdb-start] == 0}
+ assert_lessthan 2 [r hooks.event_count loading-progress-rdb] ;# comes from the preamble section
+ assert_lessthan 2 [r hooks.event_count loading-progress-aof]
+ if {$::verbose} {
+ puts "rdb progress events [r hooks.event_count loading-progress-rdb]"
+ puts "aof progress events [r hooks.event_count loading-progress-aof]"
+ }
+ }
+ # undo configs before next test
+ r config set dynamic-hz yes
+ r config set key-load-delay 0
+
+ test {Test module rdb save hook} {
+ # debug reload does: save, flush, load:
+ assert {[r hooks.event_count persistence-syncrdb-start] == 0}
+ assert {[r hooks.event_count loading-rdb-start] == 0}
+ r debug reload
+ assert {[r hooks.event_count persistence-syncrdb-start] == 1}
+ assert {[r hooks.event_count loading-rdb-start] == 1}
+ }
+
+ test {Test key unlink hook} {
+ r set testkey1 hello
+ r del testkey1
+ assert {[r hooks.event_count key-info-testkey1] == 1}
+ assert_equal [r hooks.event_last key-info-testkey1] testkey1
+ r lpush testkey1 hello
+ r lpop testkey1
+ assert {[r hooks.event_count key-info-testkey1] == 2}
+ assert_equal [r hooks.event_last key-info-testkey1] testkey1
+ r set testkey2 world
+ r unlink testkey2
+ assert {[r hooks.event_count key-info-testkey2] == 1}
+ assert_equal [r hooks.event_last key-info-testkey2] testkey2
+ }
+
+ test {Test removed key event} {
+ r set str abcd
+ r set str abcde
+ # For String Type value is returned
+ assert_equal {abcd overwritten} [r hooks.is_key_removed str]
+ assert_equal -1 [r hooks.pexpireat str]
+
+ r del str
+ assert_equal {abcde deleted} [r hooks.is_key_removed str]
+ assert_equal -1 [r hooks.pexpireat str]
+
+ # test int encoded string
+ r set intstr 12345678
+ # incr doesn't fire event
+ r incr intstr
+ catch {[r hooks.is_key_removed intstr]} output
+ assert_match {ERR * removed} $output
+ r del intstr
+ assert_equal {12345679 deleted} [r hooks.is_key_removed intstr]
+
+ catch {[r hooks.is_key_removed not-exists]} output
+ assert_match {ERR * removed} $output
+
+ r hset hash f v
+ r hdel hash f
+ assert_equal {0 deleted} [r hooks.is_key_removed hash]
+
+ r hset hash f v a b
+ r del hash
+ assert_equal {2 deleted} [r hooks.is_key_removed hash]
+
+ r lpush list 1
+ r lpop list
+ assert_equal {0 deleted} [r hooks.is_key_removed list]
+
+ r lpush list 1 2 3
+ r del list
+ assert_equal {3 deleted} [r hooks.is_key_removed list]
+
+ r sadd set 1
+ r spop set
+ assert_equal {0 deleted} [r hooks.is_key_removed set]
+
+ r sadd set 1 2 3 4
+ r del set
+ assert_equal {4 deleted} [r hooks.is_key_removed set]
+
+ r zadd zset 1 f
+ r zpopmin zset
+ assert_equal {0 deleted} [r hooks.is_key_removed zset]
+
+ r zadd zset 1 f 2 d
+ r del zset
+ assert_equal {2 deleted} [r hooks.is_key_removed zset]
+
+ r xadd stream 1-1 f v
+ r xdel stream 1-1
+ # Stream does not delete object when del entry
+ catch {[r hooks.is_key_removed stream]} output
+ assert_match {ERR * removed} $output
+ r del stream
+ assert_equal {0 deleted} [r hooks.is_key_removed stream]
+
+ r xadd stream 2-1 f v
+ r del stream
+ assert_equal {1 deleted} [r hooks.is_key_removed stream]
+
+ # delete key because of active expire
+ set size [r dbsize]
+ r set active-expire abcd px 1
+ #ensure active expire
+ wait_for_condition 50 100 {
+ [r dbsize] == $size
+ } else {
+ fail "Active expire not trigger"
+ }
+ assert_equal {abcd expired} [r hooks.is_key_removed active-expire]
+ # current time is greater than pexpireat
+ set now [r time]
+ set mill [expr ([lindex $now 0]*1000)+([lindex $now 1]/1000)]
+ assert {$mill >= [r hooks.pexpireat active-expire]}
+
+ # delete key because of lazy expire
+ r debug set-active-expire 0
+ r set lazy-expire abcd px 1
+ after 10
+ r get lazy-expire
+ assert_equal {abcd expired} [r hooks.is_key_removed lazy-expire]
+ set now [r time]
+ set mill [expr ([lindex $now 0]*1000)+([lindex $now 1]/1000)]
+ assert {$mill >= [r hooks.pexpireat lazy-expire]}
+ r debug set-active-expire 1
+
+ # delete key not yet expired
+ set now [r time]
+ set expireat [expr ([lindex $now 0]*1000)+([lindex $now 1]/1000)+1000000]
+ r set not-expire abcd pxat $expireat
+ r del not-expire
+ assert_equal {abcd deleted} [r hooks.is_key_removed not-expire]
+ assert_equal $expireat [r hooks.pexpireat not-expire]
+
+ # Test key evict
+ set used [expr {[s used_memory] - [s mem_not_counted_for_evict]}]
+ set limit [expr {$used+100*1024}]
+ set old_policy [lindex [r config get maxmemory-policy] 1]
+ r config set maxmemory $limit
+ # We set policy volatile-random, so only keys with ttl will be evicted
+ r config set maxmemory-policy volatile-random
+ r setex volatile-key 10000 x
+ # We use SETBIT here, so we can set a big key and get the used_memory
+ # bigger than maxmemory. Next command will evict volatile keys. We
+ # can't use SET, as SET uses big input buffer, so it will fail.
+ r setbit big-key 1600000 0 ;# this will consume 200kb
+ r getbit big-key 0
+ assert_equal {x evicted} [r hooks.is_key_removed volatile-key]
+ r config set maxmemory-policy $old_policy
+ r config set maxmemory 0
+ } {OK} {needs:debug}
+
+ test {Test flushdb hooks} {
+ r flushdb
+ assert_equal [r hooks.event_last flush-start] 9
+ assert_equal [r hooks.event_last flush-end] 9
+ r flushall
+ assert_equal [r hooks.event_last flush-start] -1
+ assert_equal [r hooks.event_last flush-end] -1
+ }
+
+ # replication related tests
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+ start_server {} {
+ r module load $testmodule
+ set replica [srv 0 client]
+ set replica_host [srv 0 host]
+ set replica_port [srv 0 port]
+ $replica replicaof $master_host $master_port
+
+ wait_replica_online $master
+
+ test {Test master link up hook} {
+ assert_equal [r hooks.event_count masterlink-up] 1
+ assert_equal [r hooks.event_count masterlink-down] 0
+ }
+
+ test {Test role-replica hook} {
+ assert_equal [r hooks.event_count role-replica] 1
+ assert_equal [r hooks.event_count role-master] 0
+ assert_equal [r hooks.event_last role-replica] [s 0 master_host]
+ }
+
+ test {Test replica-online hook} {
+ assert_equal [r -1 hooks.event_count replica-online] 1
+ assert_equal [r -1 hooks.event_count replica-offline] 0
+ }
+
+ test {Test master link down hook} {
+ r client kill type master
+ assert_equal [r hooks.event_count masterlink-down] 1
+
+ wait_for_condition 50 100 {
+ [string match {*master_link_status:up*} [r info replication]]
+ } else {
+ fail "Replica didn't reconnect"
+ }
+
+ assert_equal [r hooks.event_count masterlink-down] 1
+ assert_equal [r hooks.event_count masterlink-up] 2
+ }
+
+ wait_for_condition 50 10 {
+ [string match {*master_link_status:up*} [r info replication]]
+ } else {
+ fail "Can't turn the instance into a replica"
+ }
+
+ $replica replicaof no one
+
+ test {Test role-master hook} {
+ assert_equal [r hooks.event_count role-replica] 1
+ assert_equal [r hooks.event_count role-master] 1
+ assert_equal [r hooks.event_last role-master] {}
+ }
+
+ test {Test replica-offline hook} {
+ assert_equal [r -1 hooks.event_count replica-online] 2
+ assert_equal [r -1 hooks.event_count replica-offline] 2
+ }
+ # get the replica stdout, to be used by the next test
+ set replica_stdout [srv 0 stdout]
+ }
+
+ test {Test swapdb hooks} {
+ r swapdb 0 10
+ assert_equal [r hooks.event_last swapdb-first] 0
+ assert_equal [r hooks.event_last swapdb-second] 10
+ }
+
+ test {Test configchange hooks} {
+ r config set rdbcompression no
+ assert_equal [r hooks.event_last config-change-count] 1
+ assert_equal [r hooks.event_last config-change-first] rdbcompression
+ }
+
+ # look into the log file of the server that just exited
+ test {Test shutdown hook} {
+ assert_equal [string match {*module-event-shutdown*} [exec tail -5 < $replica_stdout]] 1
+ }
+ }
+
+ start_server {} {
+ test {OnLoad failure will handle un-registration} {
+ catch {r module load $testmodule noload}
+ r flushall
+ r ping
+ }
+ }
+}
diff --git a/tests/unit/moduleapi/infotest.tcl b/tests/unit/moduleapi/infotest.tcl
new file mode 100644
index 0000000..ccd8c4e
--- /dev/null
+++ b/tests/unit/moduleapi/infotest.tcl
@@ -0,0 +1,131 @@
+set testmodule [file normalize tests/modules/infotest.so]
+
+# Return value for INFO property
+proc field {info property} {
+ if {[regexp "\r\n$property:(.*?)\r\n" $info _ value]} {
+ set _ $value
+ }
+}
+
+start_server {tags {"modules"}} {
+ r module load $testmodule log-key 0
+
+ test {module reading info} {
+ # check string, integer and float fields
+ assert_equal [r info.gets replication role] "master"
+ assert_equal [r info.getc replication role] "master"
+ assert_equal [r info.geti stats expired_keys] 0
+ assert_equal [r info.getd stats expired_stale_perc] 0
+
+ # check signed and unsigned
+ assert_equal [r info.geti infotest infotest_global] -2
+ assert_equal [r info.getu infotest infotest_uglobal] -2
+
+ # the above are always 0, try module info that is non-zero
+ assert_equal [r info.geti infotest_italian infotest_due] 2
+ set tre [r info.getd infotest_italian infotest_tre]
+ assert {$tre > 3.2 && $tre < 3.4 }
+
+ # search using the wrong section
+ catch { [r info.gets badname redis_version] } e
+ assert_match {*not found*} $e
+
+ # check that section filter works
+ assert { [string match "*usec_per_call*" [r info.gets all cmdstat_info.gets] ] }
+ catch { [r info.gets default cmdstat_info.gets] ] } e
+ assert_match {*not found*} $e
+ }
+
+ test {module info all} {
+ set info [r info all]
+ # info all does not contain modules
+ assert { ![string match "*Spanish*" $info] }
+ assert { ![string match "*infotest_*" $info] }
+ assert { [string match "*used_memory*" $info] }
+ }
+
+ test {module info all infotest} {
+ set info [r info all infotest]
+ # info all infotest should contain both ALL and the module information
+ assert { [string match "*Spanish*" $info] }
+ assert { [string match "*infotest_*" $info] }
+ assert { [string match "*used_memory*" $info] }
+ }
+
+ test {module info everything} {
+ set info [r info everything]
+ # info everything contains all default sections, but not ones for crash report
+ assert { [string match "*infotest_global*" $info] }
+ assert { [string match "*Spanish*" $info] }
+ assert { [string match "*Italian*" $info] }
+ assert { [string match "*used_memory*" $info] }
+ assert { ![string match "*Klingon*" $info] }
+ field $info infotest_dos
+ } {2}
+
+ test {module info modules} {
+ set info [r info modules]
+ # info all does not contain modules
+ assert { [string match "*Spanish*" $info] }
+ assert { [string match "*infotest_global*" $info] }
+ assert { ![string match "*used_memory*" $info] }
+ }
+
+ test {module info one module} {
+ set info [r info INFOtest] ;# test case insensitive compare
+ # info all does not contain modules
+ assert { [string match "*Spanish*" $info] }
+ assert { ![string match "*used_memory*" $info] }
+ field $info infotest_global
+ } {-2}
+
+ test {module info one section} {
+ set info [r info INFOtest_SpanisH] ;# test case insensitive compare
+ assert { ![string match "*used_memory*" $info] }
+ assert { ![string match "*Italian*" $info] }
+ assert { ![string match "*infotest_global*" $info] }
+ field $info infotest_uno
+ } {one}
+
+ test {module info dict} {
+ set info [r info infotest_keyspace]
+ set keyspace [field $info infotest_db0]
+ set keys [scan [regexp -inline {keys\=([\d]*)} $keyspace] keys=%d]
+ } {3}
+
+ test {module info unsafe fields} {
+ set info [r info infotest_unsafe]
+ assert_match {*infotest_unsafe_field:value=1*} $info
+ }
+
+ test {module info multiply sections without all, everything, default keywords} {
+ set info [r info replication INFOTEST]
+ assert { [string match "*Spanish*" $info] }
+ assert { ![string match "*used_memory*" $info] }
+ assert { [string match "*repl_offset*" $info] }
+ }
+
+ test {module info multiply sections with all keyword and modules} {
+ set info [r info all modules]
+ assert { [string match "*cluster*" $info] }
+ assert { [string match "*cmdstat_info*" $info] }
+ assert { [string match "*infotest_global*" $info] }
+ }
+
+ test {module info multiply sections with everything keyword} {
+ set info [r info replication everything cpu]
+ assert { [string match "*client_recent*" $info] }
+ assert { [string match "*cmdstat_info*" $info] }
+ assert { [string match "*Italian*" $info] }
+ # check that we didn't get the same info twice
+ assert { ![string match "*used_cpu_user_children*used_cpu_user_children*" $info] }
+ assert { ![string match "*Italian*Italian*" $info] }
+ field $info infotest_dos
+ } {2}
+
+ test "Unload the module - infotest" {
+ assert_equal {OK} [r module unload infotest]
+ }
+
+ # TODO: test crash report.
+}
diff --git a/tests/unit/moduleapi/infra.tcl b/tests/unit/moduleapi/infra.tcl
new file mode 100644
index 0000000..1140e5a
--- /dev/null
+++ b/tests/unit/moduleapi/infra.tcl
@@ -0,0 +1,25 @@
+set testmodule [file normalize tests/modules/infotest.so]
+
+test {modules config rewrite} {
+
+ start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ set modules [lmap x [r module list] {dict get $x name}]
+ assert_not_equal [lsearch $modules infotest] -1
+
+ r config rewrite
+ restart_server 0 true false
+
+ set modules [lmap x [r module list] {dict get $x name}]
+ assert_not_equal [lsearch $modules infotest] -1
+
+ assert_equal {OK} [r module unload infotest]
+
+ r config rewrite
+ restart_server 0 true false
+
+ set modules [lmap x [r module list] {dict get $x name}]
+ assert_equal [lsearch $modules infotest] -1
+ }
+}
diff --git a/tests/unit/moduleapi/keyspace_events.tcl b/tests/unit/moduleapi/keyspace_events.tcl
new file mode 100644
index 0000000..1323b12
--- /dev/null
+++ b/tests/unit/moduleapi/keyspace_events.tcl
@@ -0,0 +1,118 @@
+set testmodule [file normalize tests/modules/keyspace_events.so]
+
+tags "modules" {
+ start_server [list overrides [list loadmodule "$testmodule"]] {
+
+ test {Test loaded key space event} {
+ r set x 1
+ r hset y f v
+ r lpush z 1 2 3
+ r sadd p 1 2 3
+ r zadd t 1 f1 2 f2
+ r xadd s * f v
+ r debug reload
+ assert_equal {1 x} [r keyspace.is_key_loaded x]
+ assert_equal {1 y} [r keyspace.is_key_loaded y]
+ assert_equal {1 z} [r keyspace.is_key_loaded z]
+ assert_equal {1 p} [r keyspace.is_key_loaded p]
+ assert_equal {1 t} [r keyspace.is_key_loaded t]
+ assert_equal {1 s} [r keyspace.is_key_loaded s]
+ }
+
+ test {Nested multi due to RM_Call} {
+ r del multi
+ r del lua
+
+ r set x 1
+ r set x_copy 1
+ r keyspace.del_key_copy x
+ r keyspace.incr_case1 x
+ r keyspace.incr_case2 x
+ r keyspace.incr_case3 x
+ assert_equal {} [r get multi]
+ assert_equal {} [r get lua]
+ r get x
+ } {3}
+
+ test {Nested multi due to RM_Call, with client MULTI} {
+ r del multi
+ r del lua
+
+ r set x 1
+ r set x_copy 1
+ r multi
+ r keyspace.del_key_copy x
+ r keyspace.incr_case1 x
+ r keyspace.incr_case2 x
+ r keyspace.incr_case3 x
+ r exec
+ assert_equal {1} [r get multi]
+ assert_equal {} [r get lua]
+ r get x
+ } {3}
+
+ test {Nested multi due to RM_Call, with EVAL} {
+ r del multi
+ r del lua
+
+ r set x 1
+ r set x_copy 1
+ r eval {
+ redis.pcall('keyspace.del_key_copy', KEYS[1])
+ redis.pcall('keyspace.incr_case1', KEYS[1])
+ redis.pcall('keyspace.incr_case2', KEYS[1])
+ redis.pcall('keyspace.incr_case3', KEYS[1])
+ } 1 x
+ assert_equal {} [r get multi]
+ assert_equal {1} [r get lua]
+ r get x
+ } {3}
+
+ test {Test module key space event} {
+ r keyspace.notify x
+ assert_equal {1 x} [r keyspace.is_module_key_notified x]
+ }
+
+ test "Keyspace notifications: module events test" {
+ r config set notify-keyspace-events Kd
+ r del x
+ set rd1 [redis_deferring_client]
+ assert_equal {1} [psubscribe $rd1 *]
+ r keyspace.notify x
+ assert_equal {pmessage * __keyspace@9__:x notify} [$rd1 read]
+ $rd1 close
+ }
+
+ test {Test expired key space event} {
+ set prev_expired [s expired_keys]
+ r set exp 1 PX 10
+ wait_for_condition 100 10 {
+ [s expired_keys] eq $prev_expired + 1
+ } else {
+ fail "key not expired"
+ }
+ assert_equal [r get testkeyspace:expired] 1
+ }
+
+ test "Unload the module - testkeyspace" {
+ assert_equal {OK} [r module unload testkeyspace]
+ }
+
+ test "Verify RM_StringDMA with expiration are not causing invalid memory access" {
+ assert_equal {OK} [r set x 1 EX 1]
+ }
+ }
+
+ start_server {} {
+ test {OnLoad failure will handle un-registration} {
+ catch {r module load $testmodule noload}
+ r set x 1
+ r hset y f v
+ r lpush z 1 2 3
+ r sadd p 1 2 3
+ r zadd t 1 f1 2 f2
+ r xadd s * f v
+ r ping
+ }
+ }
+}
diff --git a/tests/unit/moduleapi/keyspecs.tcl b/tests/unit/moduleapi/keyspecs.tcl
new file mode 100644
index 0000000..9e68e97
--- /dev/null
+++ b/tests/unit/moduleapi/keyspecs.tcl
@@ -0,0 +1,160 @@
+set testmodule [file normalize tests/modules/keyspecs.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test "Module key specs: No spec, only legacy triple" {
+ set reply [lindex [r command info kspec.none] 0]
+ # Verify (first, last, step) and not movablekeys
+ assert_equal [lindex $reply 2] {module}
+ assert_equal [lindex $reply 3] 1
+ assert_equal [lindex $reply 4] -1
+ assert_equal [lindex $reply 5] 2
+ # Verify key-spec auto-generated from the legacy triple
+ set keyspecs [lindex $reply 8]
+ assert_equal [llength $keyspecs] 1
+ assert_equal [lindex $keyspecs 0] {flags {RW access update} begin_search {type index spec {index 1}} find_keys {type range spec {lastkey -1 keystep 2 limit 0}}}
+ assert_equal [r command getkeys kspec.none key1 val1 key2 val2] {key1 key2}
+ }
+
+ test "Module key specs: No spec, only legacy triple with getkeys-api" {
+ set reply [lindex [r command info kspec.nonewithgetkeys] 0]
+ # Verify (first, last, step) and movablekeys
+ assert_equal [lindex $reply 2] {module movablekeys}
+ assert_equal [lindex $reply 3] 1
+ assert_equal [lindex $reply 4] -1
+ assert_equal [lindex $reply 5] 2
+ # Verify key-spec auto-generated from the legacy triple
+ set keyspecs [lindex $reply 8]
+ assert_equal [llength $keyspecs] 1
+ assert_equal [lindex $keyspecs 0] {flags {RW access update variable_flags} begin_search {type index spec {index 1}} find_keys {type range spec {lastkey -1 keystep 2 limit 0}}}
+ assert_equal [r command getkeys kspec.nonewithgetkeys key1 val1 key2 val2] {key1 key2}
+ }
+
+ test "Module key specs: Two ranges" {
+ set reply [lindex [r command info kspec.tworanges] 0]
+ # Verify (first, last, step) and not movablekeys
+ assert_equal [lindex $reply 2] {module}
+ assert_equal [lindex $reply 3] 1
+ assert_equal [lindex $reply 4] 2
+ assert_equal [lindex $reply 5] 1
+ # Verify key-specs
+ set keyspecs [lindex $reply 8]
+ assert_equal [lindex $keyspecs 0] {flags {RO access} begin_search {type index spec {index 1}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}}
+ assert_equal [lindex $keyspecs 1] {flags {RW update} begin_search {type index spec {index 2}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}}
+ assert_equal [r command getkeys kspec.tworanges foo bar baz quux] {foo bar}
+ }
+
+ test "Module key specs: Two ranges with gap" {
+ set reply [lindex [r command info kspec.tworangeswithgap] 0]
+ # Verify (first, last, step) and movablekeys
+ assert_equal [lindex $reply 2] {module movablekeys}
+ assert_equal [lindex $reply 3] 1
+ assert_equal [lindex $reply 4] 1
+ assert_equal [lindex $reply 5] 1
+ # Verify key-specs
+ set keyspecs [lindex $reply 8]
+ assert_equal [lindex $keyspecs 0] {flags {RO access} begin_search {type index spec {index 1}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}}
+ assert_equal [lindex $keyspecs 1] {flags {RW update} begin_search {type index spec {index 3}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}}
+ assert_equal [r command getkeys kspec.tworangeswithgap foo bar baz quux] {foo baz}
+ }
+
+ test "Module key specs: Keyword-only spec clears the legacy triple" {
+ set reply [lindex [r command info kspec.keyword] 0]
+ # Verify (first, last, step) and movablekeys
+ assert_equal [lindex $reply 2] {module movablekeys}
+ assert_equal [lindex $reply 3] 0
+ assert_equal [lindex $reply 4] 0
+ assert_equal [lindex $reply 5] 0
+ # Verify key-specs
+ set keyspecs [lindex $reply 8]
+ assert_equal [lindex $keyspecs 0] {flags {RO access} begin_search {type keyword spec {keyword KEYS startfrom 1}} find_keys {type range spec {lastkey -1 keystep 1 limit 0}}}
+ assert_equal [r command getkeys kspec.keyword foo KEYS bar baz] {bar baz}
+ }
+
+ test "Module key specs: Complex specs, case 1" {
+ set reply [lindex [r command info kspec.complex1] 0]
+ # Verify (first, last, step) and movablekeys
+ assert_equal [lindex $reply 2] {module movablekeys}
+ assert_equal [lindex $reply 3] 1
+ assert_equal [lindex $reply 4] 1
+ assert_equal [lindex $reply 5] 1
+ # Verify key-specs
+ set keyspecs [lindex $reply 8]
+ assert_equal [lindex $keyspecs 0] {flags RO begin_search {type index spec {index 1}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}}
+ assert_equal [lindex $keyspecs 1] {flags {RW update} begin_search {type keyword spec {keyword STORE startfrom 2}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}}
+ assert_equal [lindex $keyspecs 2] {flags {RO access} begin_search {type keyword spec {keyword KEYS startfrom 2}} find_keys {type keynum spec {keynumidx 0 firstkey 1 keystep 1}}}
+ assert_equal [r command getkeys kspec.complex1 foo dummy KEYS 1 bar baz STORE quux] {foo quux bar}
+ }
+
+ test "Module key specs: Complex specs, case 2" {
+ set reply [lindex [r command info kspec.complex2] 0]
+ # Verify (first, last, step) and movablekeys
+ assert_equal [lindex $reply 2] {module movablekeys}
+ assert_equal [lindex $reply 3] 1
+ assert_equal [lindex $reply 4] 2
+ assert_equal [lindex $reply 5] 1
+ # Verify key-specs
+ set keyspecs [lindex $reply 8]
+ assert_equal [lindex $keyspecs 0] {flags {RW update} begin_search {type keyword spec {keyword STORE startfrom 5}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}}
+ assert_equal [lindex $keyspecs 1] {flags {RO access} begin_search {type index spec {index 1}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}}
+ assert_equal [lindex $keyspecs 2] {flags {RO access} begin_search {type index spec {index 2}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}}
+ assert_equal [lindex $keyspecs 3] {flags {RW update} begin_search {type index spec {index 3}} find_keys {type keynum spec {keynumidx 0 firstkey 1 keystep 1}}}
+ assert_equal [lindex $keyspecs 4] {flags {RW update} begin_search {type keyword spec {keyword MOREKEYS startfrom 5}} find_keys {type range spec {lastkey -1 keystep 1 limit 0}}}
+ assert_equal [r command getkeys kspec.complex2 foo bar 2 baz quux banana STORE dst dummy MOREKEYS hey ho] {dst foo bar baz quux hey ho}
+ }
+
+ test "Module command list filtering" {
+ ;# Note: we piggyback this tcl file to test the general functionality of command list filtering
+ set reply [r command list filterby module keyspecs]
+ assert_equal [lsort $reply] {kspec.complex1 kspec.complex2 kspec.keyword kspec.none kspec.nonewithgetkeys kspec.tworanges kspec.tworangeswithgap}
+ assert_equal [r command getkeys kspec.complex2 foo bar 2 baz quux banana STORE dst dummy MOREKEYS hey ho] {dst foo bar baz quux hey ho}
+ }
+
+ test {COMMAND GETKEYSANDFLAGS correctly reports module key-spec without flags} {
+ r command getkeysandflags kspec.none key1 val1 key2 val2
+ } {{key1 {RW access update}} {key2 {RW access update}}}
+
+ test {COMMAND GETKEYSANDFLAGS correctly reports module key-spec with flags} {
+ r command getkeysandflags kspec.nonewithgetkeys key1 val1 key2 val2
+ } {{key1 {RO access}} {key2 {RO access}}}
+
+ test {COMMAND GETKEYSANDFLAGS correctly reports module key-spec flags} {
+ r command getkeysandflags kspec.keyword keys key1 key2 key3
+ } {{key1 {RO access}} {key2 {RO access}} {key3 {RO access}}}
+
+ # user that can only read from "read" keys, write to "write" keys, and read+write to "RW" keys
+ r ACL setuser testuser +@all %R~read* %W~write* %RW~rw*
+
+ test "Module key specs: No spec, only legacy triple - ACL" {
+ # legacy triple didn't provide flags, so they require both read and write
+ assert_equal "OK" [r ACL DRYRUN testuser kspec.none rw val1]
+ assert_match {*has no permissions to access the 'read' key*} [r ACL DRYRUN testuser kspec.none read val1]
+ assert_match {*has no permissions to access the 'write' key*} [r ACL DRYRUN testuser kspec.none write val1]
+ }
+
+ test "Module key specs: tworanges - ACL" {
+ assert_equal "OK" [r ACL DRYRUN testuser kspec.tworanges read write]
+ assert_equal "OK" [r ACL DRYRUN testuser kspec.tworanges rw rw]
+ assert_match {*has no permissions to access the 'read' key*} [r ACL DRYRUN testuser kspec.tworanges rw read]
+ assert_match {*has no permissions to access the 'write' key*} [r ACL DRYRUN testuser kspec.tworanges write rw]
+ }
+
+ foreach cmd {kspec.none kspec.tworanges} {
+ test "$cmd command will not be marked with movablekeys" {
+ set info [lindex [r command info $cmd] 0]
+ assert_no_match {*movablekeys*} [lindex $info 2]
+ }
+ }
+
+ foreach cmd {kspec.keyword kspec.complex1 kspec.complex2 kspec.nonewithgetkeys} {
+ test "$cmd command is marked with movablekeys" {
+ set info [lindex [r command info $cmd] 0]
+ assert_match {*movablekeys*} [lindex $info 2]
+ }
+ }
+
+ test "Unload the module - keyspecs" {
+ assert_equal {OK} [r module unload keyspecs]
+ }
+}
diff --git a/tests/unit/moduleapi/list.tcl b/tests/unit/moduleapi/list.tcl
new file mode 100644
index 0000000..11f3b75
--- /dev/null
+++ b/tests/unit/moduleapi/list.tcl
@@ -0,0 +1,160 @@
+set testmodule [file normalize tests/modules/list.so]
+
+# The following arguments can be passed to args:
+# i -- the number of inserts
+# d -- the number of deletes
+# r -- the number of replaces
+# index -- the last index
+# entry -- The entry pointed to by index
+proc verify_list_edit_reply {reply argv} {
+ foreach {k v} $argv {
+ assert_equal [dict get $reply $k] $v
+ }
+}
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {Module list set, get, insert, delete} {
+ r del k
+ assert_error {WRONGTYPE Operation against a key holding the wrong kind of value*} {r list.set k 1 xyz}
+ r rpush k x
+ # insert, set, get
+ r list.insert k 0 foo
+ r list.insert k -1 bar
+ r list.set k 1 xyz
+ assert_equal {foo xyz bar} [r list.getall k]
+ assert_equal {foo} [r list.get k 0]
+ assert_equal {xyz} [r list.get k 1]
+ assert_equal {bar} [r list.get k 2]
+ assert_equal {bar} [r list.get k -1]
+ assert_equal {foo} [r list.get k -3]
+ assert_error {ERR index out*} {r list.get k -4}
+ assert_error {ERR index out*} {r list.get k 3}
+ # remove
+ assert_error {ERR index out*} {r list.delete k -4}
+ assert_error {ERR index out*} {r list.delete k 3}
+ r list.delete k 0
+ r list.delete k -1
+ assert_equal {xyz} [r list.getall k]
+ # removing the last element deletes the list
+ r list.delete k 0
+ assert_equal 0 [r exists k]
+ }
+
+ test {Module list iteration} {
+ r del k
+ r rpush k x y z
+ assert_equal {x y z} [r list.getall k]
+ assert_equal {z y x} [r list.getall k REVERSE]
+ }
+
+ test {Module list insert & delete} {
+ r del k
+ r rpush k x y z
+ verify_list_edit_reply [r list.edit k ikikdi foo bar baz] {i 3 index 5}
+ r list.getall k
+ } {foo x bar y baz}
+
+ test {Module list insert & delete, neg index} {
+ r del k
+ r rpush k x y z
+ verify_list_edit_reply [r list.edit k REVERSE ikikdi foo bar baz] {i 3 index -6}
+ r list.getall k
+ } {baz y bar z foo}
+
+ test {Module list set while iterating} {
+ r del k
+ r rpush k x y z
+ verify_list_edit_reply [r list.edit k rkr foo bar] {r 2 index 3}
+ r list.getall k
+ } {foo y bar}
+
+ test {Module list set while iterating, neg index} {
+ r del k
+ r rpush k x y z
+ verify_list_edit_reply [r list.edit k reverse rkr foo bar] {r 2 index -4}
+ r list.getall k
+ } {bar y foo}
+
+ test {Module list - encoding conversion while inserting} {
+ r config set list-max-listpack-size 4
+ r del k
+ r rpush k a b c d
+ assert_encoding listpack k
+
+ # Converts to quicklist after inserting.
+ r list.edit k dii foo bar
+ assert_encoding quicklist k
+ assert_equal [r list.getall k] {foo bar b c d}
+
+ # Converts to listpack after deleting three entries.
+ r list.edit k ddd e
+ assert_encoding listpack k
+ assert_equal [r list.getall k] {c d}
+ }
+
+ test {Module list - encoding conversion while replacing} {
+ r config set list-max-listpack-size -1
+ r del k
+ r rpush k x y z
+ assert_encoding listpack k
+
+ # Converts to quicklist after replacing.
+ set big [string repeat "x" 4096]
+ r list.edit k r $big
+ assert_encoding quicklist k
+ assert_equal [r list.getall k] "$big y z"
+
+ # Converts to listpack after deleting the big entry.
+ r list.edit k d
+ assert_encoding listpack k
+ assert_equal [r list.getall k] {y z}
+ }
+
+ test {Module list - list entry and index should be updated when deletion} {
+ set original_config [config_get_set list-max-listpack-size 1]
+
+ # delete from start (index 0)
+ r del l
+ r rpush l x y z
+ verify_list_edit_reply [r list.edit l dd] {d 2 index 0 entry z}
+ assert_equal [r list.getall l] {z}
+
+ # delete from start (index -3)
+ r del l
+ r rpush l x y z
+ verify_list_edit_reply [r list.edit l reverse kkd] {d 1 index -3}
+ assert_equal [r list.getall l] {y z}
+
+ # # delete from tail (index 2)
+ r del l
+ r rpush l x y z
+ verify_list_edit_reply [r list.edit l kkd] {d 1 index 2}
+ assert_equal [r list.getall l] {x y}
+
+ # # delete from tail (index -1)
+ r del l
+ r rpush l x y z
+ verify_list_edit_reply [r list.edit l reverse dd] {d 2 index -1 entry x}
+ assert_equal [r list.getall l] {x}
+
+ # # delete from middle (index 1)
+ r del l
+ r rpush l x y z
+ verify_list_edit_reply [r list.edit l kdd] {d 2 index 1}
+ assert_equal [r list.getall l] {x}
+
+ # # delete from middle (index -2)
+ r del l
+ r rpush l x y z
+ verify_list_edit_reply [r list.edit l reverse kdd] {d 2 index -2}
+ assert_equal [r list.getall l] {z}
+
+ config_set list-max-listpack-size $original_config
+ }
+
+ test "Unload the module - list" {
+ assert_equal {OK} [r module unload list]
+ }
+}
diff --git a/tests/unit/moduleapi/mallocsize.tcl b/tests/unit/moduleapi/mallocsize.tcl
new file mode 100644
index 0000000..359a7ae
--- /dev/null
+++ b/tests/unit/moduleapi/mallocsize.tcl
@@ -0,0 +1,21 @@
+set testmodule [file normalize tests/modules/mallocsize.so]
+
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {MallocSize of raw bytes} {
+ assert_equal [r mallocsize.setraw key 40] {OK}
+ assert_morethan [r memory usage key] 40
+ }
+
+ test {MallocSize of string} {
+ assert_equal [r mallocsize.setstr key abcdefg] {OK}
+ assert_morethan [r memory usage key] 7 ;# Length of "abcdefg"
+ }
+
+ test {MallocSize of dict} {
+ assert_equal [r mallocsize.setdict key f1 v1 f2 v2] {OK}
+ assert_morethan [r memory usage key] 8 ;# Length of "f1v1f2v2"
+ }
+}
diff --git a/tests/unit/moduleapi/misc.tcl b/tests/unit/moduleapi/misc.tcl
new file mode 100644
index 0000000..cf20546
--- /dev/null
+++ b/tests/unit/moduleapi/misc.tcl
@@ -0,0 +1,555 @@
+set testmodule [file normalize tests/modules/misc.so]
+
+start_server {overrides {save {900 1}} tags {"modules"}} {
+ r module load $testmodule
+
+ test {test RM_Call} {
+ set info [r test.call_info commandstats]
+ # cmdstat is not in a default section, so we also test an argument was passed
+ assert { [string match "*cmdstat_module*" $info] }
+ }
+
+ test {test RM_Call args array} {
+ set info [r test.call_generic info commandstats]
+ # cmdstat is not in a default section, so we also test an argument was passed
+ assert { [string match "*cmdstat_module*" $info] }
+ }
+
+ test {test RM_Call recursive} {
+ set info [r test.call_generic test.call_generic info commandstats]
+ assert { [string match "*cmdstat_module*" $info] }
+ }
+
+ test {test redis version} {
+ set version [s redis_version]
+ assert_equal $version [r test.redisversion]
+ }
+
+ test {test long double conversions} {
+ set ld [r test.ld_conversion]
+ assert {[string match $ld "0.00000000000000001"]}
+ }
+
+ test {test unsigned long long conversions} {
+ set ret [r test.ull_conversion]
+ assert {[string match $ret "ok"]}
+ }
+
+ test {test module db commands} {
+ r set x foo
+ set key [r test.randomkey]
+ assert_equal $key "x"
+ assert_equal [r test.dbsize] 1
+ r test.flushall
+ assert_equal [r test.dbsize] 0
+ }
+
+ test {test RedisModule_ResetDataset do not reset functions} {
+ r function load {#!lua name=lib
+ redis.register_function('test', function() return 1 end)
+ }
+ assert_equal [r function list] {{library_name lib engine LUA functions {{name test description {} flags {}}}}}
+ r test.flushall
+ assert_equal [r function list] {{library_name lib engine LUA functions {{name test description {} flags {}}}}}
+ r function flush
+ }
+
+ test {test module keyexists} {
+ r set x foo
+ assert_equal 1 [r test.keyexists x]
+ r del x
+ assert_equal 0 [r test.keyexists x]
+ }
+
+ test {test module lru api} {
+ r config set maxmemory-policy allkeys-lru
+ r set x foo
+ set lru [r test.getlru x]
+ assert { $lru <= 1000 }
+ set was_set [r test.setlru x 100000]
+ assert { $was_set == 1 }
+ set idle [r object idletime x]
+ assert { $idle >= 100 }
+ set lru [r test.getlru x]
+ assert { $lru >= 100000 }
+ r config set maxmemory-policy allkeys-lfu
+ set lru [r test.getlru x]
+ assert { $lru == -1 }
+ set was_set [r test.setlru x 100000]
+ assert { $was_set == 0 }
+ }
+ r config set maxmemory-policy allkeys-lru
+
+ test {test module lfu api} {
+ r config set maxmemory-policy allkeys-lfu
+ r set x foo
+ set lfu [r test.getlfu x]
+ assert { $lfu >= 1 }
+ set was_set [r test.setlfu x 100]
+ assert { $was_set == 1 }
+ set freq [r object freq x]
+ assert { $freq <= 100 }
+ set lfu [r test.getlfu x]
+ assert { $lfu <= 100 }
+ r config set maxmemory-policy allkeys-lru
+ set lfu [r test.getlfu x]
+ assert { $lfu == -1 }
+ set was_set [r test.setlfu x 100]
+ assert { $was_set == 0 }
+ }
+
+ test {test module clientinfo api} {
+ # Test basic sanity and SSL flag
+ set info [r test.clientinfo]
+ set ssl_flag [expr $::tls ? {"ssl:"} : {":"}]
+
+ assert { [dict get $info db] == 9 }
+ assert { [dict get $info flags] == "${ssl_flag}::::" }
+
+ # Test MULTI flag
+ r multi
+ r test.clientinfo
+ set info [lindex [r exec] 0]
+ assert { [dict get $info flags] == "${ssl_flag}::::multi" }
+
+ # Test TRACKING flag
+ r client tracking on
+ set info [r test.clientinfo]
+ assert { [dict get $info flags] == "${ssl_flag}::tracking::" }
+ r CLIENT TRACKING off
+ }
+
+ test {tracking with rm_call sanity} {
+ set rd_trk [redis_client]
+ $rd_trk HELLO 3
+ $rd_trk CLIENT TRACKING on
+ r MSET key1{t} 1 key2{t} 1
+
+ # GET triggers tracking, SET does not
+ $rd_trk test.rm_call GET key1{t}
+ $rd_trk test.rm_call SET key2{t} 2
+ r MSET key1{t} 2 key2{t} 2
+ assert_equal {invalidate key1{t}} [$rd_trk read]
+ assert_equal "PONG" [$rd_trk ping]
+ $rd_trk close
+ }
+
+ test {tracking with rm_call with script} {
+ set rd_trk [redis_client]
+ $rd_trk HELLO 3
+ $rd_trk CLIENT TRACKING on
+ r MSET key1{t} 1 key2{t} 1
+
+ # GET triggers tracking, SET does not
+ $rd_trk test.rm_call EVAL "redis.call('get', 'key1{t}')" 2 key1{t} key2{t}
+ r MSET key1{t} 2 key2{t} 2
+ assert_equal {invalidate key1{t}} [$rd_trk read]
+ assert_equal "PONG" [$rd_trk ping]
+ $rd_trk close
+ }
+
+ test {publish to self inside rm_call} {
+ r hello 3
+ r subscribe foo
+
+ # published message comes after the response of the command that issued it.
+ assert_equal [r test.rm_call publish foo bar] {1}
+ assert_equal [r read] {message foo bar}
+
+ r unsubscribe foo
+ r hello 2
+ set _ ""
+ } {} {resp3}
+
+ test {test module get/set client name by id api} {
+ catch { r test.getname } e
+ assert_equal "-ERR No name" $e
+ r client setname nobody
+ catch { r test.setname "name with spaces" } e
+ assert_match "*Invalid argument*" $e
+ assert_equal nobody [r client getname]
+ assert_equal nobody [r test.getname]
+ r test.setname somebody
+ assert_equal somebody [r client getname]
+ }
+
+ test {test module getclientcert api} {
+ set cert [r test.getclientcert]
+
+ if {$::tls} {
+ assert {$cert != ""}
+ } else {
+ assert {$cert == ""}
+ }
+ }
+
+ test {test detached thread safe cnotext} {
+ r test.log_tsctx "info" "Test message"
+ verify_log_message 0 "*<misc> Test message*" 0
+ }
+
+ test {test RM_Call CLIENT INFO} {
+ assert_match "*fd=-1*" [r test.call_generic client info]
+ }
+
+ test {Unsafe command names are sanitized in INFO output} {
+ r test.weird:cmd
+ set info [r info commandstats]
+ assert_match {*cmdstat_test.weird_cmd:calls=1*} $info
+ }
+
+ test {test monotonic time} {
+ set x [r test.monotonic_time]
+ assert { [r test.monotonic_time] >= $x }
+ }
+
+ test {rm_call OOM} {
+ r config set maxmemory 1
+ r config set maxmemory-policy volatile-lru
+
+ # sanity test plain call
+ assert_equal {OK} [
+ r test.rm_call set x 1
+ ]
+
+ # add the M flag
+ assert_error {OOM *} {
+ r test.rm_call_flags M set x 1
+
+ }
+
+ # test a non deny-oom command
+ assert_equal {1} [
+ r test.rm_call_flags M get x
+ ]
+
+ r config set maxmemory 0
+ } {OK} {needs:config-maxmemory}
+
+ test {rm_call clear OOM} {
+ r config set maxmemory 1
+
+ # verify rm_call fails with OOM
+ assert_error {OOM *} {
+ r test.rm_call_flags M set x 1
+ }
+
+ # clear OOM state
+ r config set maxmemory 0
+
+ # test set command is allowed
+ r test.rm_call_flags M set x 1
+ } {OK} {needs:config-maxmemory}
+
+ test {rm_call OOM Eval} {
+ r config set maxmemory 1
+ r config set maxmemory-policy volatile-lru
+
+ # use the M flag without allow-oom shebang flag
+ assert_error {OOM *} {
+ r test.rm_call_flags M eval {#!lua
+ redis.call('set','x',1)
+ return 1
+ } 1 x
+ }
+
+ # add the M flag with allow-oom shebang flag
+ assert_equal {1} [
+ r test.rm_call_flags M eval {#!lua flags=allow-oom
+ redis.call('set','x',1)
+ return 1
+ } 1 x
+ ]
+
+ r config set maxmemory 0
+ } {OK} {needs:config-maxmemory}
+
+ test {rm_call write flag} {
+ # add the W flag
+ assert_error {ERR Write command 'set' was called while write is not allowed.} {
+ r test.rm_call_flags W set x 1
+ }
+
+ # test a non deny-oom command
+ r test.rm_call_flags W get x
+ } {1}
+
+ test {rm_call EVAL} {
+ r test.rm_call eval {
+ redis.call('set','x',1)
+ return 1
+ } 1 x
+
+ assert_error {ERR Write commands are not allowed from read-only scripts.*} {
+ r test.rm_call eval {#!lua flags=no-writes
+ redis.call('set','x',1)
+ return 1
+ } 1 x
+ }
+ }
+
+ # Note: each script is unique, to check that flags are extracted correctly
+ test {rm_call EVAL - OOM - with M flag} {
+ r config set maxmemory 1
+
+ # script without shebang, but uses SET, so fails
+ assert_error {*OOM command not allowed when used memory > 'maxmemory'*} {
+ r test.rm_call_flags M eval {
+ redis.call('set','x',1)
+ return 1
+ } 1 x
+ }
+
+ # script with an allow-oom flag, succeeds despite using SET
+ r test.rm_call_flags M eval {#!lua flags=allow-oom
+ redis.call('set','x', 1)
+ return 2
+ } 1 x
+
+ # script with no-writes flag, implies allow-oom, succeeds
+ r test.rm_call_flags M eval {#!lua flags=no-writes
+ redis.call('get','x')
+ return 2
+ } 1 x
+
+ # script with shebang using default flags, so fails regardless of using only GET
+ assert_error {*OOM command not allowed when used memory > 'maxmemory'*} {
+ r test.rm_call_flags M eval {#!lua
+ redis.call('get','x')
+ return 3
+ } 1 x
+ }
+
+ # script without shebang, but uses GET, so succeeds
+ r test.rm_call_flags M eval {
+ redis.call('get','x')
+ return 4
+ } 1 x
+
+ r config set maxmemory 0
+ } {OK} {needs:config-maxmemory}
+
+ # All RM_Call for script succeeds in OOM state without using the M flag
+ test {rm_call EVAL - OOM - without M flag} {
+ r config set maxmemory 1
+
+ # no shebang at all
+ r test.rm_call eval {
+ redis.call('set','x',1)
+ return 6
+ } 1 x
+
+ # Shebang without flags
+ r test.rm_call eval {#!lua
+ redis.call('set','x', 1)
+ return 7
+ } 1 x
+
+ # with allow-oom flag
+ r test.rm_call eval {#!lua flags=allow-oom
+ redis.call('set','x', 1)
+ return 8
+ } 1 x
+
+ r config set maxmemory 0
+ } {OK} {needs:config-maxmemory}
+
+ test "not enough good replicas" {
+ r set x "some value"
+ r config set min-replicas-to-write 1
+
+ # rm_call in script mode
+ assert_error {NOREPLICAS *} {r test.rm_call_flags S set x s}
+
+ assert_equal [
+ r test.rm_call eval {#!lua flags=no-writes
+ return redis.call('get','x')
+ } 1 x
+ ] "some value"
+
+ assert_equal [
+ r test.rm_call eval {
+ return redis.call('get','x')
+ } 1 x
+ ] "some value"
+
+ assert_error {NOREPLICAS *} {
+ r test.rm_call eval {#!lua
+ return redis.call('get','x')
+ } 1 x
+ }
+
+ assert_error {NOREPLICAS *} {
+ r test.rm_call eval {
+ return redis.call('set','x', 1)
+ } 1 x
+ }
+
+ r config set min-replicas-to-write 0
+ }
+
+ test {rm_call EVAL - read-only replica} {
+ r replicaof 127.0.0.1 1
+
+ # rm_call in script mode
+ assert_error {READONLY *} {r test.rm_call_flags S set x 1}
+
+ assert_error {READONLY You can't write against a read only replica. script*} {
+ r test.rm_call eval {
+ redis.call('set','x',1)
+ return 1
+ } 1 x
+ }
+
+ r test.rm_call eval {#!lua flags=no-writes
+ redis.call('get','x')
+ return 2
+ } 1 x
+
+ assert_error {READONLY Can not run script with write flag on readonly replica*} {
+ r test.rm_call eval {#!lua
+ redis.call('get','x')
+ return 3
+ } 1 x
+ }
+
+ r test.rm_call eval {
+ redis.call('get','x')
+ return 4
+ } 1 x
+
+ r replicaof no one
+ } {OK} {needs:config-maxmemory}
+
+ test {rm_call EVAL - stale replica} {
+ r replicaof 127.0.0.1 1
+ r config set replica-serve-stale-data no
+
+ # rm_call in script mode
+ assert_error {MASTERDOWN *} {
+ r test.rm_call_flags S get x
+ }
+
+ assert_error {MASTERDOWN *} {
+ r test.rm_call eval {#!lua flags=no-writes
+ redis.call('get','x')
+ return 2
+ } 1 x
+ }
+
+ assert_error {MASTERDOWN *} {
+ r test.rm_call eval {
+ redis.call('get','x')
+ return 4
+ } 1 x
+ }
+
+ r replicaof no one
+ r config set replica-serve-stale-data yes
+ } {OK} {needs:config-maxmemory}
+
+ test "rm_call EVAL - failed bgsave prevents writes" {
+ r config set rdb-key-save-delay 10000000
+ populate 1000
+ r set x x
+ r bgsave
+ set pid1 [get_child_pid 0]
+ catch {exec kill -9 $pid1}
+ waitForBgsave r
+
+ # make sure a read command succeeds
+ assert_equal [r get x] x
+
+ # make sure a write command fails
+ assert_error {MISCONF *} {r set x y}
+
+ # rm_call in script mode
+ assert_error {MISCONF *} {r test.rm_call_flags S set x 1}
+
+ # repeate with script
+ assert_error {MISCONF *} {r test.rm_call eval {
+ return redis.call('set','x',1)
+ } 1 x
+ }
+ assert_equal {x} [r test.rm_call eval {
+ return redis.call('get','x')
+ } 1 x
+ ]
+
+ # again with script using shebang
+ assert_error {MISCONF *} {r test.rm_call eval {#!lua
+ return redis.call('set','x',1)
+ } 1 x
+ }
+ assert_equal {x} [r test.rm_call eval {#!lua flags=no-writes
+ return redis.call('get','x')
+ } 1 x
+ ]
+
+ r config set rdb-key-save-delay 0
+ r bgsave
+ waitForBgsave r
+
+ # server is writable again
+ r set x y
+ } {OK}
+}
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {test Dry Run - OK OOM/ACL} {
+ set x 5
+ r set x $x
+ catch {r test.rm_call_flags DMC set x 10} e
+ assert_match {*NULL reply returned*} $e
+ assert_equal [r get x] 5
+ }
+
+ test {test Dry Run - Fail OOM} {
+ set x 5
+ r set x $x
+ r config set maxmemory 1
+ catch {r test.rm_call_flags DM set x 10} e
+ assert_match {*OOM*} $e
+ assert_equal [r get x] $x
+ r config set maxmemory 0
+ } {OK} {needs:config-maxmemory}
+
+ test {test Dry Run - Fail ACL} {
+ set x 5
+ r set x $x
+ # deny all permissions besides the dryrun command
+ r acl setuser default resetkeys
+
+ catch {r test.rm_call_flags DC set x 10} e
+ assert_match {*NOPERM No permissions to access a key*} $e
+ r acl setuser default +@all ~*
+ assert_equal [r get x] $x
+ }
+
+ test {test silent open key} {
+ r debug set-active-expire 0
+ r test.clear_n_events
+ r set x 1 PX 10
+ after 1000
+ # now the key has been expired, open it silently and make sure not event were fired.
+ assert_error {key not found} {r test.silent_open_key x}
+ assert_equal {0} [r test.get_n_events]
+ }
+
+if {[string match {*jemalloc*} [s mem_allocator]]} {
+ test {test RM_Call with large arg for SET command} {
+ # set a big value to trigger increasing the query buf
+ r set foo [string repeat A 100000]
+ # set a smaller value but > PROTO_MBULK_BIG_ARG (32*1024) Redis will try to save the query buf itself on the DB.
+ r test.call_generic set bar [string repeat A 33000]
+ # asset the value was trimmed
+ assert {[r memory usage bar] < 42000}; # 42K to count for Jemalloc's additional memory overhead.
+ }
+} ;# if jemalloc
+
+ test "Unload the module - misc" {
+ assert_equal {OK} [r module unload misc]
+ }
+}
diff --git a/tests/unit/moduleapi/moduleauth.tcl b/tests/unit/moduleapi/moduleauth.tcl
new file mode 100644
index 0000000..82f42f5
--- /dev/null
+++ b/tests/unit/moduleapi/moduleauth.tcl
@@ -0,0 +1,405 @@
+set testmodule [file normalize tests/modules/auth.so]
+set testmoduletwo [file normalize tests/modules/moduleauthtwo.so]
+set miscmodule [file normalize tests/modules/misc.so]
+
+proc cmdstat {cmd} {
+ return [cmdrstat $cmd r]
+}
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+ r module load $testmoduletwo
+
+ set hello2_response [r HELLO 2]
+ set hello3_response [r HELLO 3]
+
+ test {test registering module auth callbacks} {
+ assert_equal {OK} [r testmoduleone.rm_register_blocking_auth_cb]
+ assert_equal {OK} [r testmoduletwo.rm_register_auth_cb]
+ assert_equal {OK} [r testmoduleone.rm_register_auth_cb]
+ }
+
+ test {test module AUTH for non existing / disabled users} {
+ r config resetstat
+ # Validate that an error is thrown for non existing users.
+ assert_error {*WRONGPASS*} {r AUTH foo pwd}
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdstat auth]
+ # Validate that an error is thrown for disabled users.
+ r acl setuser foo >pwd off ~* &* +@all
+ assert_error {*WRONGPASS*} {r AUTH foo pwd}
+ assert_match {*calls=2,*,rejected_calls=0,failed_calls=2} [cmdstat auth]
+ }
+
+ test {test non blocking module AUTH} {
+ r config resetstat
+ # Test for a fixed password user
+ r acl setuser foo >pwd on ~* &* +@all
+ assert_equal {OK} [r AUTH foo allow]
+ assert_error {*Auth denied by Misc Module*} {r AUTH foo deny}
+ assert_match {*calls=2,*,rejected_calls=0,failed_calls=1} [cmdstat auth]
+ assert_error {*WRONGPASS*} {r AUTH foo nomatch}
+ assert_match {*calls=3,*,rejected_calls=0,failed_calls=2} [cmdstat auth]
+ assert_equal {OK} [r AUTH foo pwd]
+ # Test for No Pass user
+ r acl setuser foo on ~* &* +@all nopass
+ assert_equal {OK} [r AUTH foo allow]
+ assert_error {*Auth denied by Misc Module*} {r AUTH foo deny}
+ assert_match {*calls=6,*,rejected_calls=0,failed_calls=3} [cmdstat auth]
+ assert_equal {OK} [r AUTH foo nomatch]
+
+ # Validate that the Module added an ACL Log entry.
+ set entry [lindex [r ACL LOG] 0]
+ assert {[dict get $entry username] eq {foo}}
+ assert {[dict get $entry context] eq {module}}
+ assert {[dict get $entry reason] eq {auth}}
+ assert {[dict get $entry object] eq {Module Auth}}
+ assert_match {*cmd=auth*} [dict get $entry client-info]
+ r ACL LOG RESET
+ }
+
+ test {test non blocking module HELLO AUTH} {
+ r config resetstat
+ r acl setuser foo >pwd on ~* &* +@all
+ # Validate proto 2 and 3 in case of success
+ assert_equal $hello2_response [r HELLO 2 AUTH foo pwd]
+ assert_equal $hello2_response [r HELLO 2 AUTH foo allow]
+ assert_equal $hello3_response [r HELLO 3 AUTH foo pwd]
+ assert_equal $hello3_response [r HELLO 3 AUTH foo allow]
+ # Validate denying AUTH for the HELLO cmd
+ assert_error {*Auth denied by Misc Module*} {r HELLO 2 AUTH foo deny}
+ assert_match {*calls=5,*,rejected_calls=0,failed_calls=1} [cmdstat hello]
+ assert_error {*WRONGPASS*} {r HELLO 2 AUTH foo nomatch}
+ assert_match {*calls=6,*,rejected_calls=0,failed_calls=2} [cmdstat hello]
+ assert_error {*Auth denied by Misc Module*} {r HELLO 3 AUTH foo deny}
+ assert_match {*calls=7,*,rejected_calls=0,failed_calls=3} [cmdstat hello]
+ assert_error {*WRONGPASS*} {r HELLO 3 AUTH foo nomatch}
+ assert_match {*calls=8,*,rejected_calls=0,failed_calls=4} [cmdstat hello]
+
+ # Validate that the Module added an ACL Log entry.
+ set entry [lindex [r ACL LOG] 1]
+ assert {[dict get $entry username] eq {foo}}
+ assert {[dict get $entry context] eq {module}}
+ assert {[dict get $entry reason] eq {auth}}
+ assert {[dict get $entry object] eq {Module Auth}}
+ assert_match {*cmd=hello*} [dict get $entry client-info]
+ r ACL LOG RESET
+ }
+
+ test {test non blocking module HELLO AUTH SETNAME} {
+ r config resetstat
+ r acl setuser foo >pwd on ~* &* +@all
+ # Validate clientname is set on success
+ assert_equal $hello2_response [r HELLO 2 AUTH foo pwd setname client1]
+ assert {[r client getname] eq {client1}}
+ assert_equal $hello2_response [r HELLO 2 AUTH foo allow setname client2]
+ assert {[r client getname] eq {client2}}
+ # Validate clientname is not updated on failure
+ r client setname client0
+ assert_error {*Auth denied by Misc Module*} {r HELLO 2 AUTH foo deny setname client1}
+ assert {[r client getname] eq {client0}}
+ assert_match {*calls=3,*,rejected_calls=0,failed_calls=1} [cmdstat hello]
+ assert_error {*WRONGPASS*} {r HELLO 2 AUTH foo nomatch setname client2}
+ assert {[r client getname] eq {client0}}
+ assert_match {*calls=4,*,rejected_calls=0,failed_calls=2} [cmdstat hello]
+ }
+
+ test {test blocking module AUTH} {
+ r config resetstat
+ # Test for a fixed password user
+ r acl setuser foo >pwd on ~* &* +@all
+ assert_equal {OK} [r AUTH foo block_allow]
+ assert_error {*Auth denied by Misc Module*} {r AUTH foo block_deny}
+ assert_match {*calls=2,*,rejected_calls=0,failed_calls=1} [cmdstat auth]
+ assert_error {*WRONGPASS*} {r AUTH foo nomatch}
+ assert_match {*calls=3,*,rejected_calls=0,failed_calls=2} [cmdstat auth]
+ assert_equal {OK} [r AUTH foo pwd]
+ # Test for No Pass user
+ r acl setuser foo on ~* &* +@all nopass
+ assert_equal {OK} [r AUTH foo block_allow]
+ assert_error {*Auth denied by Misc Module*} {r AUTH foo block_deny}
+ assert_match {*calls=6,*,rejected_calls=0,failed_calls=3} [cmdstat auth]
+ assert_equal {OK} [r AUTH foo nomatch]
+ # Validate that every Blocking AUTH command took at least 500000 usec.
+ set stats [cmdstat auth]
+ regexp "usec_per_call=(\[0-9]{1,})\.*," $stats all usec_per_call
+ assert {$usec_per_call >= 500000}
+
+ # Validate that the Module added an ACL Log entry.
+ set entry [lindex [r ACL LOG] 0]
+ assert {[dict get $entry username] eq {foo}}
+ assert {[dict get $entry context] eq {module}}
+ assert {[dict get $entry reason] eq {auth}}
+ assert {[dict get $entry object] eq {Module Auth}}
+ assert_match {*cmd=auth*} [dict get $entry client-info]
+ r ACL LOG RESET
+ }
+
+ test {test blocking module HELLO AUTH} {
+ r config resetstat
+ r acl setuser foo >pwd on ~* &* +@all
+ # validate proto 2 and 3 in case of success
+ assert_equal $hello2_response [r HELLO 2 AUTH foo pwd]
+ assert_equal $hello2_response [r HELLO 2 AUTH foo block_allow]
+ assert_equal $hello3_response [r HELLO 3 AUTH foo pwd]
+ assert_equal $hello3_response [r HELLO 3 AUTH foo block_allow]
+ # validate denying AUTH for the HELLO cmd
+ assert_error {*Auth denied by Misc Module*} {r HELLO 2 AUTH foo block_deny}
+ assert_match {*calls=5,*,rejected_calls=0,failed_calls=1} [cmdstat hello]
+ assert_error {*WRONGPASS*} {r HELLO 2 AUTH foo nomatch}
+ assert_match {*calls=6,*,rejected_calls=0,failed_calls=2} [cmdstat hello]
+ assert_error {*Auth denied by Misc Module*} {r HELLO 3 AUTH foo block_deny}
+ assert_match {*calls=7,*,rejected_calls=0,failed_calls=3} [cmdstat hello]
+ assert_error {*WRONGPASS*} {r HELLO 3 AUTH foo nomatch}
+ assert_match {*calls=8,*,rejected_calls=0,failed_calls=4} [cmdstat hello]
+ # Validate that every HELLO AUTH command took at least 500000 usec.
+ set stats [cmdstat hello]
+ regexp "usec_per_call=(\[0-9]{1,})\.*," $stats all usec_per_call
+ assert {$usec_per_call >= 500000}
+
+ # Validate that the Module added an ACL Log entry.
+ set entry [lindex [r ACL LOG] 1]
+ assert {[dict get $entry username] eq {foo}}
+ assert {[dict get $entry context] eq {module}}
+ assert {[dict get $entry reason] eq {auth}}
+ assert {[dict get $entry object] eq {Module Auth}}
+ assert_match {*cmd=hello*} [dict get $entry client-info]
+ r ACL LOG RESET
+ }
+
+ test {test blocking module HELLO AUTH SETNAME} {
+ r config resetstat
+ r acl setuser foo >pwd on ~* &* +@all
+ # Validate clientname is set on success
+ assert_equal $hello2_response [r HELLO 2 AUTH foo pwd setname client1]
+ assert {[r client getname] eq {client1}}
+ assert_equal $hello2_response [r HELLO 2 AUTH foo block_allow setname client2]
+ assert {[r client getname] eq {client2}}
+ # Validate clientname is not updated on failure
+ r client setname client0
+ assert_error {*Auth denied by Misc Module*} {r HELLO 2 AUTH foo block_deny setname client1}
+ assert {[r client getname] eq {client0}}
+ assert_match {*calls=3,*,rejected_calls=0,failed_calls=1} [cmdstat hello]
+ assert_error {*WRONGPASS*} {r HELLO 2 AUTH foo nomatch setname client2}
+ assert {[r client getname] eq {client0}}
+ assert_match {*calls=4,*,rejected_calls=0,failed_calls=2} [cmdstat hello]
+ # Validate that every HELLO AUTH SETNAME command took at least 500000 usec.
+ set stats [cmdstat hello]
+ regexp "usec_per_call=(\[0-9]{1,})\.*," $stats all usec_per_call
+ assert {$usec_per_call >= 500000}
+ }
+
+ test {test AUTH after registering multiple module auth callbacks} {
+ r config resetstat
+
+ # Register two more callbacks from the same module.
+ assert_equal {OK} [r testmoduleone.rm_register_blocking_auth_cb]
+ assert_equal {OK} [r testmoduleone.rm_register_auth_cb]
+
+ # Register another module auth callback from the second module.
+ assert_equal {OK} [r testmoduletwo.rm_register_auth_cb]
+
+ r acl setuser foo >pwd on ~* &* +@all
+
+ # Case 1 - Non Blocking Success
+ assert_equal {OK} [r AUTH foo allow]
+
+ # Case 2 - Non Blocking Deny
+ assert_error {*Auth denied by Misc Module*} {r AUTH foo deny}
+ assert_match {*calls=2,*,rejected_calls=0,failed_calls=1} [cmdstat auth]
+
+ r config resetstat
+
+ # Case 3 - Blocking Success
+ assert_equal {OK} [r AUTH foo block_allow]
+
+ # Case 4 - Blocking Deny
+ assert_error {*Auth denied by Misc Module*} {r AUTH foo block_deny}
+ assert_match {*calls=2,*,rejected_calls=0,failed_calls=1} [cmdstat auth]
+
+ # Validate that every Blocking AUTH command took at least 500000 usec.
+ set stats [cmdstat auth]
+ regexp "usec_per_call=(\[0-9]{1,})\.*," $stats all usec_per_call
+ assert {$usec_per_call >= 500000}
+
+ r config resetstat
+
+ # Case 5 - Non Blocking Success via the second module.
+ assert_equal {OK} [r AUTH foo allow_two]
+
+ # Case 6 - Non Blocking Deny via the second module.
+ assert_error {*Auth denied by Misc Module*} {r AUTH foo deny_two}
+ assert_match {*calls=2,*,rejected_calls=0,failed_calls=1} [cmdstat auth]
+
+ r config resetstat
+
+ # Case 7 - All four auth callbacks "Skip" by not explicitly allowing or denying.
+ assert_error {*WRONGPASS*} {r AUTH foo nomatch}
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdstat auth]
+ assert_equal {OK} [r AUTH foo pwd]
+
+ # Because we had to attempt all 4 callbacks, validate that the AUTH command took at least
+ # 1000000 usec (each blocking callback takes 500000 usec).
+ set stats [cmdstat auth]
+ regexp "usec_per_call=(\[0-9]{1,})\.*," $stats all usec_per_call
+ assert {$usec_per_call >= 1000000}
+ }
+
+ test {module auth during blocking module auth} {
+ r config resetstat
+ r acl setuser foo >pwd on ~* &* +@all
+ set rd [redis_deferring_client]
+ set rd_two [redis_deferring_client]
+
+ # Attempt blocking module auth. While this ongoing, attempt non blocking module auth from
+ # moduleone/moduletwo and start another blocking module auth from another deferring client.
+ $rd AUTH foo block_allow
+ wait_for_blocked_clients_count 1
+ assert_equal {OK} [r AUTH foo allow]
+ assert_equal {OK} [r AUTH foo allow_two]
+ # Validate that the non blocking module auth cmds finished before any blocking module auth.
+ set info_clients [r info clients]
+ assert_match "*blocked_clients:1*" $info_clients
+ $rd_two AUTH foo block_allow
+
+ # Validate that all of the AUTH commands succeeded.
+ wait_for_blocked_clients_count 0 500 10
+ $rd flush
+ assert_equal [$rd read] "OK"
+ $rd_two flush
+ assert_equal [$rd_two read] "OK"
+ assert_match {*calls=4,*,rejected_calls=0,failed_calls=0} [cmdstat auth]
+ }
+
+ test {module auth inside MULTI EXEC} {
+ r config resetstat
+ r acl setuser foo >pwd on ~* &* +@all
+
+ # Validate that non blocking module auth inside MULTI succeeds.
+ r multi
+ r AUTH foo allow
+ assert_equal {OK} [r exec]
+
+ # Validate that blocking module auth inside MULTI throws an err.
+ r multi
+ r AUTH foo block_allow
+ assert_error {*ERR Blocking module command called from transaction*} {r exec}
+ assert_match {*calls=2,*,rejected_calls=0,failed_calls=1} [cmdstat auth]
+ }
+
+ test {Disabling Redis User during blocking module auth} {
+ r config resetstat
+ r acl setuser foo >pwd on ~* &* +@all
+ set rd [redis_deferring_client]
+
+ # Attempt blocking module auth and disable the Redis user while module auth is in progress.
+ $rd AUTH foo pwd
+ wait_for_blocked_clients_count 1
+ r acl setuser foo >pwd off ~* &* +@all
+
+ # Validate that module auth failed.
+ wait_for_blocked_clients_count 0 500 10
+ $rd flush
+ assert_error {*WRONGPASS*} { $rd read }
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdstat auth]
+ }
+
+ test {Killing a client in the middle of blocking module auth} {
+ r config resetstat
+ r acl setuser foo >pwd on ~* &* +@all
+ set rd [redis_deferring_client]
+ $rd client id
+ set cid [$rd read]
+
+ # Attempt blocking module auth command on client `cid` and kill the client while module auth
+ # is in progress.
+ $rd AUTH foo pwd
+ wait_for_blocked_clients_count 1
+ r client kill id $cid
+
+ # Validate that the blocked client count goes to 0 and no AUTH command is tracked.
+ wait_for_blocked_clients_count 0 500 10
+ $rd flush
+ assert_error {*I/O error reading reply*} { $rd read }
+ assert_match {} [cmdstat auth]
+ }
+
+ test {test RM_AbortBlock Module API during blocking module auth} {
+ r config resetstat
+ r acl setuser foo >pwd on ~* &* +@all
+
+ # Attempt module auth. With the "block_abort" as the password, the "testacl.so" module
+ # blocks the client and uses the RM_AbortBlock API. This should result in module auth
+ # failing and the client being unblocked with the default AUTH err message.
+ assert_error {*WRONGPASS*} {r AUTH foo block_abort}
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdstat auth]
+ }
+
+ test {test RM_RegisterAuthCallback Module API during blocking module auth} {
+ r config resetstat
+ r acl setuser foo >defaultpwd on ~* &* +@all
+ set rd [redis_deferring_client]
+
+ # Start the module auth attempt with the standard Redis auth password for the user. This
+ # will result in all module auth cbs attempted and then standard Redis auth will be tried.
+ $rd AUTH foo defaultpwd
+ wait_for_blocked_clients_count 1
+
+ # Validate that we allow modules to register module auth cbs while module auth is already
+ # in progress.
+ assert_equal {OK} [r testmoduleone.rm_register_blocking_auth_cb]
+ assert_equal {OK} [r testmoduletwo.rm_register_auth_cb]
+
+ # Validate that blocking module auth succeeds.
+ wait_for_blocked_clients_count 0 500 10
+ $rd flush
+ assert_equal [$rd read] "OK"
+ set stats [cmdstat auth]
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=0} $stats
+
+ # Validate that even the new blocking module auth cb which was registered in the middle of
+ # blocking module auth is attempted - making it take twice the duration (2x 500000 us).
+ regexp "usec_per_call=(\[0-9]{1,})\.*," $stats all usec_per_call
+ assert {$usec_per_call >= 1000000}
+ }
+
+ test {Module unload during blocking module auth} {
+ r config resetstat
+ r module load $miscmodule
+ set rd [redis_deferring_client]
+ r acl setuser foo >pwd on ~* &* +@all
+
+ # Start a blocking module auth attempt.
+ $rd AUTH foo block_allow
+ wait_for_blocked_clients_count 1
+
+ # moduleone and moduletwo have module auth cbs registered. Because blocking module auth is
+ # ongoing, they cannot be unloaded.
+ catch {r module unload testacl} e
+ assert_match {*the module has blocked clients*} $e
+ # The moduleauthtwo module can be unregistered because no client is blocked on it.
+ assert_equal "OK" [r module unload moduleauthtwo]
+
+ # The misc module does not have module auth cbs registered, so it can be unloaded even when
+ # blocking module auth is ongoing.
+ assert_equal "OK" [r module unload misc]
+
+ # Validate that blocking module auth succeeds.
+ wait_for_blocked_clients_count 0 500 10
+ $rd flush
+ assert_equal [$rd read] "OK"
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=0} [cmdstat auth]
+
+ # Validate that unloading the moduleauthtwo module does not unregister module auth cbs of
+ # of the testacl module. Module based auth should succeed.
+ assert_equal {OK} [r AUTH foo allow]
+
+ # Validate that the testacl module can be unloaded since blocking module auth is done.
+ r module unload testacl
+
+ # Validate that since all module auth cbs are unregistered, module auth attempts fail.
+ assert_error {*WRONGPASS*} {r AUTH foo block_allow}
+ assert_error {*WRONGPASS*} {r AUTH foo allow_two}
+ assert_error {*WRONGPASS*} {r AUTH foo allow}
+ assert_match {*calls=5,*,rejected_calls=0,failed_calls=3} [cmdstat auth]
+ }
+}
diff --git a/tests/unit/moduleapi/moduleconfigs.tcl b/tests/unit/moduleapi/moduleconfigs.tcl
new file mode 100644
index 0000000..1709e9d
--- /dev/null
+++ b/tests/unit/moduleapi/moduleconfigs.tcl
@@ -0,0 +1,247 @@
+set testmodule [file normalize tests/modules/moduleconfigs.so]
+set testmoduletwo [file normalize tests/modules/moduleconfigstwo.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+ test {Config get commands work} {
+ # Make sure config get module config works
+ assert_not_equal [lsearch [lmap x [r module list] {dict get $x name}] moduleconfigs] -1
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool yes"
+ assert_equal [r config get moduleconfigs.immutable_bool] "moduleconfigs.immutable_bool no"
+ assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 1024"
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string {secret password}"
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum one"
+ assert_equal [r config get moduleconfigs.flags] "moduleconfigs.flags {one two}"
+ assert_equal [r config get moduleconfigs.numeric] "moduleconfigs.numeric -1"
+ }
+
+ test {Config set commands work} {
+ # Make sure that config sets work during runtime
+ r config set moduleconfigs.mutable_bool no
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool no"
+ r config set moduleconfigs.memory_numeric 1mb
+ assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 1048576"
+ r config set moduleconfigs.string wafflewednesdays
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string wafflewednesdays"
+ set not_embstr [string repeat A 50]
+ r config set moduleconfigs.string $not_embstr
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string $not_embstr"
+ r config set moduleconfigs.string \x73\x75\x70\x65\x72\x20\x00\x73\x65\x63\x72\x65\x74\x20\x70\x61\x73\x73\x77\x6f\x72\x64
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string {super \0secret password}"
+ r config set moduleconfigs.enum two
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum two"
+ r config set moduleconfigs.flags two
+ assert_equal [r config get moduleconfigs.flags] "moduleconfigs.flags two"
+ r config set moduleconfigs.numeric -2
+ assert_equal [r config get moduleconfigs.numeric] "moduleconfigs.numeric -2"
+ }
+
+ test {Config set commands enum flags} {
+ r config set moduleconfigs.flags "none"
+ assert_equal [r config get moduleconfigs.flags] "moduleconfigs.flags none"
+
+ r config set moduleconfigs.flags "two four"
+ assert_equal [r config get moduleconfigs.flags] "moduleconfigs.flags {two four}"
+
+ r config set moduleconfigs.flags "five"
+ assert_equal [r config get moduleconfigs.flags] "moduleconfigs.flags five"
+
+ r config set moduleconfigs.flags "one four"
+ assert_equal [r config get moduleconfigs.flags] "moduleconfigs.flags five"
+
+ r config set moduleconfigs.flags "one two four"
+ assert_equal [r config get moduleconfigs.flags] "moduleconfigs.flags {five two}"
+ }
+
+ test {Immutable flag works properly and rejected strings dont leak} {
+ # Configs flagged immutable should not allow sets
+ catch {[r config set moduleconfigs.immutable_bool yes]} e
+ assert_match {*can't set immutable config*} $e
+ catch {[r config set moduleconfigs.string rejectisfreed]} e
+ assert_match {*Cannot set string to 'rejectisfreed'*} $e
+ }
+
+ test {Numeric limits work properly} {
+ # Configs over/under the limit shouldn't be allowed, and memory configs should only take memory values
+ catch {[r config set moduleconfigs.memory_numeric 200gb]} e
+ assert_match {*argument must be between*} $e
+ catch {[r config set moduleconfigs.memory_numeric -5]} e
+ assert_match {*argument must be a memory value*} $e
+ catch {[r config set moduleconfigs.numeric -10]} e
+ assert_match {*argument must be between*} $e
+ }
+
+ test {Enums only able to be set to passed in values} {
+ # Module authors specify what values are valid for enums, check that only those values are ok on a set
+ catch {[r config set moduleconfigs.enum asdf]} e
+ assert_match {*must be one of the following*} $e
+ }
+
+ test {test blocking of config registration and load outside of OnLoad} {
+ assert_equal [r block.register.configs.outside.onload] OK
+ }
+
+ test {Unload removes module configs} {
+ r module unload moduleconfigs
+ assert_equal [r config get moduleconfigs.*] ""
+ r module load $testmodule
+ # these should have reverted back to their module specified values
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool yes"
+ assert_equal [r config get moduleconfigs.immutable_bool] "moduleconfigs.immutable_bool no"
+ assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 1024"
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string {secret password}"
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum one"
+ assert_equal [r config get moduleconfigs.flags] "moduleconfigs.flags {one two}"
+ assert_equal [r config get moduleconfigs.numeric] "moduleconfigs.numeric -1"
+ r module unload moduleconfigs
+ }
+
+ test {test loadex functionality} {
+ r module loadex $testmodule CONFIG moduleconfigs.mutable_bool no CONFIG moduleconfigs.immutable_bool yes CONFIG moduleconfigs.memory_numeric 2mb CONFIG moduleconfigs.string tclortickle
+ assert_not_equal [lsearch [lmap x [r module list] {dict get $x name}] moduleconfigs] -1
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool no"
+ assert_equal [r config get moduleconfigs.immutable_bool] "moduleconfigs.immutable_bool yes"
+ assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 2097152"
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string tclortickle"
+ # Configs that were not changed should still be their module specified value
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum one"
+ assert_equal [r config get moduleconfigs.flags] "moduleconfigs.flags {one two}"
+ assert_equal [r config get moduleconfigs.numeric] "moduleconfigs.numeric -1"
+ }
+
+ test {apply function works} {
+ catch {[r config set moduleconfigs.mutable_bool yes]} e
+ assert_match {*Bool configs*} $e
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool no"
+ catch {[r config set moduleconfigs.memory_numeric 1000 moduleconfigs.numeric 1000]} e
+ assert_match {*cannot equal*} $e
+ assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 2097152"
+ assert_equal [r config get moduleconfigs.numeric] "moduleconfigs.numeric -1"
+ r module unload moduleconfigs
+ }
+
+ test {test double config argument to loadex} {
+ r module loadex $testmodule CONFIG moduleconfigs.mutable_bool yes CONFIG moduleconfigs.mutable_bool no
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool no"
+ r module unload moduleconfigs
+ }
+
+ test {missing loadconfigs call} {
+ catch {[r module loadex $testmodule CONFIG moduleconfigs.string "cool" ARGS noload]} e
+ assert_match {*ERR*} $e
+ }
+
+ test {test loadex rejects bad configs} {
+ # Bad config 200gb is over the limit
+ catch {[r module loadex $testmodule CONFIG moduleconfigs.memory_numeric 200gb ARGS]} e
+ assert_match {*ERR*} $e
+ # We should completely remove all configs on a failed load
+ assert_equal [r config get moduleconfigs.*] ""
+ # No value for config, should error out
+ catch {[r module loadex $testmodule CONFIG moduleconfigs.mutable_bool CONFIG moduleconfigs.enum two ARGS]} e
+ assert_match {*ERR*} $e
+ assert_equal [r config get moduleconfigs.*] ""
+ # Asan will catch this if this string is not freed
+ catch {[r module loadex $testmodule CONFIG moduleconfigs.string rejectisfreed]}
+ assert_match {*ERR*} $e
+ assert_equal [r config get moduleconfigs.*] ""
+ # test we can't set random configs
+ catch {[r module loadex $testmodule CONFIG maxclients 333]}
+ assert_match {*ERR*} $e
+ assert_equal [r config get moduleconfigs.*] ""
+ assert_not_equal [r config get maxclients] "maxclients 333"
+ # test we can't set other module's configs
+ r module load $testmoduletwo
+ catch {[r module loadex $testmodule CONFIG configs.test no]}
+ assert_match {*ERR*} $e
+ assert_equal [r config get configs.test] "configs.test yes"
+ r module unload configs
+ }
+
+ test {test config rewrite with dynamic load} {
+ #translates to: super \0secret password
+ r module loadex $testmodule CONFIG moduleconfigs.string \x73\x75\x70\x65\x72\x20\x00\x73\x65\x63\x72\x65\x74\x20\x70\x61\x73\x73\x77\x6f\x72\x64 ARGS
+ assert_not_equal [lsearch [lmap x [r module list] {dict get $x name}] moduleconfigs] -1
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string {super \0secret password}"
+ r config set moduleconfigs.mutable_bool yes
+ r config set moduleconfigs.memory_numeric 750
+ r config set moduleconfigs.enum two
+ r config set moduleconfigs.flags "four two"
+ r config rewrite
+ restart_server 0 true false
+ # Ensure configs we rewrote are present and that the conf file is readable
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool yes"
+ assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 750"
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string {super \0secret password}"
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum two"
+ assert_equal [r config get moduleconfigs.flags] "moduleconfigs.flags {two four}"
+ assert_equal [r config get moduleconfigs.numeric] "moduleconfigs.numeric -1"
+ r module unload moduleconfigs
+ }
+
+ test {test multiple modules with configs} {
+ r module load $testmodule
+ r module loadex $testmoduletwo CONFIG configs.test yes
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool yes"
+ assert_equal [r config get moduleconfigs.immutable_bool] "moduleconfigs.immutable_bool no"
+ assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 1024"
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string {secret password}"
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum one"
+ assert_equal [r config get moduleconfigs.numeric] "moduleconfigs.numeric -1"
+ assert_equal [r config get configs.test] "configs.test yes"
+ r config set moduleconfigs.mutable_bool no
+ r config set moduleconfigs.string nice
+ r config set moduleconfigs.enum two
+ r config set configs.test no
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool no"
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string nice"
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum two"
+ assert_equal [r config get configs.test] "configs.test no"
+ r config rewrite
+ # test we can load from conf file with multiple different modules.
+ restart_server 0 true false
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool no"
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string nice"
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum two"
+ assert_equal [r config get configs.test] "configs.test no"
+ r module unload moduleconfigs
+ r module unload configs
+ }
+
+ test {test 1.module load 2.config rewrite 3.module unload 4.config rewrite works} {
+ # Configs need to be removed from the old config file in this case.
+ r module loadex $testmodule CONFIG moduleconfigs.memory_numeric 500 ARGS
+ assert_not_equal [lsearch [lmap x [r module list] {dict get $x name}] moduleconfigs] -1
+ r config rewrite
+ r module unload moduleconfigs
+ r config rewrite
+ restart_server 0 true false
+ # Ensure configs we rewrote are no longer present
+ assert_equal [r config get moduleconfigs.*] ""
+ }
+ test {startup moduleconfigs} {
+ # No loadmodule directive
+ catch {exec src/redis-server --moduleconfigs.string "hello"} err
+ assert_match {*Module Configuration detected without loadmodule directive or no ApplyConfig call: aborting*} $err
+
+ # Bad config value
+ catch {exec src/redis-server --loadmodule "$testmodule" --moduleconfigs.string "rejectisfreed"} err
+ assert_match {*Issue during loading of configuration moduleconfigs.string : Cannot set string to 'rejectisfreed'*} $err
+
+ # missing LoadConfigs call
+ catch {exec src/redis-server --loadmodule "$testmodule" noload --moduleconfigs.string "hello"} err
+ assert_match {*Module Configurations were not set, likely a missing LoadConfigs call. Unloading the module.*} $err
+
+ # successful
+ start_server [list overrides [list loadmodule "$testmodule" moduleconfigs.string "bootedup" moduleconfigs.enum two moduleconfigs.flags "two four"]] {
+ assert_equal [r config get moduleconfigs.string] "moduleconfigs.string bootedup"
+ assert_equal [r config get moduleconfigs.mutable_bool] "moduleconfigs.mutable_bool yes"
+ assert_equal [r config get moduleconfigs.immutable_bool] "moduleconfigs.immutable_bool no"
+ assert_equal [r config get moduleconfigs.enum] "moduleconfigs.enum two"
+ assert_equal [r config get moduleconfigs.flags] "moduleconfigs.flags {two four}"
+ assert_equal [r config get moduleconfigs.numeric] "moduleconfigs.numeric -1"
+ assert_equal [r config get moduleconfigs.memory_numeric] "moduleconfigs.memory_numeric 1024"
+ }
+ }
+}
+
diff --git a/tests/unit/moduleapi/postnotifications.tcl b/tests/unit/moduleapi/postnotifications.tcl
new file mode 100644
index 0000000..7e48c7b
--- /dev/null
+++ b/tests/unit/moduleapi/postnotifications.tcl
@@ -0,0 +1,219 @@
+set testmodule [file normalize tests/modules/postnotifications.so]
+
+tags "modules" {
+ start_server {} {
+ r module load $testmodule with_key_events
+
+ test {Test write on post notification callback} {
+ set repl [attach_to_replication_stream]
+
+ r set string_x 1
+ assert_equal {1} [r get string_changed{string_x}]
+ assert_equal {1} [r get string_total]
+
+ r set string_x 2
+ assert_equal {2} [r get string_changed{string_x}]
+ assert_equal {2} [r get string_total]
+
+ # the {lpush before_overwritten string_x} is a post notification job registered when 'string_x' was overwritten
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {set string_x 1}
+ {incr string_changed{string_x}}
+ {incr string_total}
+ {exec}
+ {multi}
+ {set string_x 2}
+ {lpush before_overwritten string_x}
+ {incr string_changed{string_x}}
+ {incr string_total}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {Test write on post notification callback from module thread} {
+ r flushall
+ set repl [attach_to_replication_stream]
+
+ assert_equal {OK} [r postnotification.async_set]
+ assert_equal {1} [r get string_changed{string_x}]
+ assert_equal {1} [r get string_total]
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {set string_x 1}
+ {incr string_changed{string_x}}
+ {incr string_total}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {Test active expire} {
+ r flushall
+ set repl [attach_to_replication_stream]
+
+ r set x 1
+ r pexpire x 10
+
+ wait_for_condition 100 50 {
+ [r keys expired] == {expired}
+ } else {
+ puts [r keys *]
+ fail "Failed waiting for x to expired"
+ }
+
+ # the {lpush before_expired x} is a post notification job registered before 'x' got expired
+ assert_replication_stream $repl {
+ {select *}
+ {set x 1}
+ {pexpireat x *}
+ {multi}
+ {del x}
+ {lpush before_expired x}
+ {incr expired}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {Test lazy expire} {
+ r flushall
+ r DEBUG SET-ACTIVE-EXPIRE 0
+ set repl [attach_to_replication_stream]
+
+ r set x 1
+ r pexpire x 1
+ after 10
+ assert_equal {} [r get x]
+
+ # the {lpush before_expired x} is a post notification job registered before 'x' got expired
+ assert_replication_stream $repl {
+ {select *}
+ {set x 1}
+ {pexpireat x *}
+ {multi}
+ {del x}
+ {lpush before_expired x}
+ {incr expired}
+ {exec}
+ }
+ close_replication_stream $repl
+ r DEBUG SET-ACTIVE-EXPIRE 1
+ } {OK} {needs:debug}
+
+ test {Test lazy expire inside post job notification} {
+ r flushall
+ r DEBUG SET-ACTIVE-EXPIRE 0
+ set repl [attach_to_replication_stream]
+
+ r set x 1
+ r pexpire x 1
+ after 10
+ assert_equal {OK} [r set read_x 1]
+
+ # the {lpush before_expired x} is a post notification job registered before 'x' got expired
+ assert_replication_stream $repl {
+ {select *}
+ {set x 1}
+ {pexpireat x *}
+ {multi}
+ {set read_x 1}
+ {del x}
+ {lpush before_expired x}
+ {incr expired}
+ {exec}
+ }
+ close_replication_stream $repl
+ r DEBUG SET-ACTIVE-EXPIRE 1
+ } {OK} {needs:debug}
+
+ test {Test nested keyspace notification} {
+ r flushall
+ set repl [attach_to_replication_stream]
+
+ assert_equal {OK} [r set write_sync_write_sync_x 1]
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {set x 1}
+ {set write_sync_x 1}
+ {set write_sync_write_sync_x 1}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {Test eviction} {
+ r flushall
+ set repl [attach_to_replication_stream]
+ r set x 1
+ r config set maxmemory-policy allkeys-random
+ r config set maxmemory 1
+
+ assert_error {OOM *} {r set y 1}
+
+ # the {lpush before_evicted x} is a post notification job registered before 'x' got evicted
+ assert_replication_stream $repl {
+ {select *}
+ {set x 1}
+ {multi}
+ {del x}
+ {lpush before_evicted x}
+ {incr evicted}
+ {exec}
+ }
+ close_replication_stream $repl
+ } {} {needs:config-maxmemory}
+ }
+}
+
+set testmodule2 [file normalize tests/modules/keyspace_events.so]
+
+tags "modules" {
+ start_server {} {
+ r module load $testmodule with_key_events
+ r module load $testmodule2
+ test {Test write on post notification callback} {
+ set repl [attach_to_replication_stream]
+
+ r set string_x 1
+ assert_equal {1} [r get string_changed{string_x}]
+ assert_equal {1} [r get string_total]
+
+ r set string_x 2
+ assert_equal {2} [r get string_changed{string_x}]
+ assert_equal {2} [r get string_total]
+
+ r set string1_x 1
+ assert_equal {1} [r get string_changed{string1_x}]
+ assert_equal {3} [r get string_total]
+
+ # the {lpush before_overwritten string_x} is a post notification job registered before 'string_x' got overwritten
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {set string_x 1}
+ {incr string_changed{string_x}}
+ {incr string_total}
+ {exec}
+ {multi}
+ {set string_x 2}
+ {lpush before_overwritten string_x}
+ {incr string_changed{string_x}}
+ {incr string_total}
+ {exec}
+ {multi}
+ {set string1_x 1}
+ {incr string_changed{string1_x}}
+ {incr string_total}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+ }
+}
diff --git a/tests/unit/moduleapi/propagate.tcl b/tests/unit/moduleapi/propagate.tcl
new file mode 100644
index 0000000..90a369d
--- /dev/null
+++ b/tests/unit/moduleapi/propagate.tcl
@@ -0,0 +1,763 @@
+set testmodule [file normalize tests/modules/propagate.so]
+set miscmodule [file normalize tests/modules/misc.so]
+set keyspace_events [file normalize tests/modules/keyspace_events.so]
+
+tags "modules" {
+ test {Modules can propagate in async and threaded contexts} {
+ start_server [list overrides [list loadmodule "$testmodule"]] {
+ set replica [srv 0 client]
+ set replica_host [srv 0 host]
+ set replica_port [srv 0 port]
+ $replica module load $keyspace_events
+ start_server [list overrides [list loadmodule "$testmodule"]] {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+ $master module load $keyspace_events
+
+ # Start the replication process...
+ $replica replicaof $master_host $master_port
+ wait_for_sync $replica
+ after 1000
+
+ test {module propagates from timer} {
+ set repl [attach_to_replication_stream]
+
+ $master propagate-test.timer
+
+ wait_for_condition 500 10 {
+ [$replica get timer] eq "3"
+ } else {
+ fail "The two counters don't match the expected value."
+ }
+
+ assert_replication_stream $repl {
+ {select *}
+ {incr timer}
+ {incr timer}
+ {incr timer}
+ }
+ close_replication_stream $repl
+ }
+
+ test {module propagation with notifications} {
+ set repl [attach_to_replication_stream]
+
+ $master set x y
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incr notifications}
+ {set x y}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {module propagation with notifications with multi} {
+ set repl [attach_to_replication_stream]
+
+ $master multi
+ $master set x1 y1
+ $master set x2 y2
+ $master exec
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incr notifications}
+ {set x1 y1}
+ {incr notifications}
+ {set x2 y2}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {module propagation with notifications with active-expire} {
+ $master debug set-active-expire 1
+ set repl [attach_to_replication_stream]
+
+ $master set asdf1 1 PX 300
+ $master set asdf2 2 PX 300
+ $master set asdf3 3 PX 300
+
+ wait_for_condition 500 10 {
+ [$replica keys asdf*] eq {}
+ } else {
+ fail "Not all keys have expired"
+ }
+
+ # Note whenever there's double notification: SET with PX issues two separate
+ # notifications: one for "set" and one for "expire"
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incr notifications}
+ {incr notifications}
+ {set asdf1 1 PXAT *}
+ {exec}
+ {multi}
+ {incr notifications}
+ {incr notifications}
+ {set asdf2 2 PXAT *}
+ {exec}
+ {multi}
+ {incr notifications}
+ {incr notifications}
+ {set asdf3 3 PXAT *}
+ {exec}
+ {multi}
+ {incr notifications}
+ {incr notifications}
+ {incr testkeyspace:expired}
+ {del asdf*}
+ {exec}
+ {multi}
+ {incr notifications}
+ {incr notifications}
+ {incr testkeyspace:expired}
+ {del asdf*}
+ {exec}
+ {multi}
+ {incr notifications}
+ {incr notifications}
+ {incr testkeyspace:expired}
+ {del asdf*}
+ {exec}
+ }
+ close_replication_stream $repl
+
+ $master debug set-active-expire 0
+ }
+
+ test {module propagation with notifications with eviction case 1} {
+ $master flushall
+ $master set asdf1 1
+ $master set asdf2 2
+ $master set asdf3 3
+
+ $master config set maxmemory-policy allkeys-random
+ $master config set maxmemory 1
+
+ # Please note the following loop:
+ # We evict a key and send a notification, which does INCR on the "notifications" key, so
+ # that every time we evict any key, "notifications" key exist (it happens inside the
+ # performEvictions loop). So even evicting "notifications" causes INCR on "notifications".
+ # If maxmemory_eviction_tenacity would have been set to 100 this would be an endless loop, but
+ # since the default is 10, at some point the performEvictions loop would end.
+ # Bottom line: "notifications" always exists and we can't really determine the order of evictions
+ # This test is here only for sanity
+
+ # The replica will get the notification with multi exec and we have a generic notification handler
+ # that performs `RedisModule_Call(ctx, "INCR", "c", "multi");` if the notification is inside multi exec.
+ # so we will have 2 keys, "notifications" and "multi".
+ wait_for_condition 500 10 {
+ [$replica dbsize] eq 2
+ } else {
+ fail "Not all keys have been evicted"
+ }
+
+ $master config set maxmemory 0
+ $master config set maxmemory-policy noeviction
+ }
+
+ test {module propagation with notifications with eviction case 2} {
+ $master flushall
+ set repl [attach_to_replication_stream]
+
+ $master set asdf1 1 EX 300
+ $master set asdf2 2 EX 300
+ $master set asdf3 3 EX 300
+
+ # Please note we use volatile eviction to prevent the loop described in the test above.
+ # "notifications" is not volatile so it always remains
+ $master config resetstat
+ $master config set maxmemory-policy volatile-ttl
+ $master config set maxmemory 1
+
+ wait_for_condition 500 10 {
+ [s evicted_keys] eq 3
+ } else {
+ fail "Not all keys have been evicted"
+ }
+
+ $master config set maxmemory 0
+ $master config set maxmemory-policy noeviction
+
+ $master set asdf4 4
+
+ # Note whenever there's double notification: SET with EX issues two separate
+ # notifications: one for "set" and one for "expire"
+ # Note that although CONFIG SET maxmemory is called in this flow (see issue #10014),
+ # eviction will happen and will not induce propagation of the CONFIG command (see #10019).
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incr notifications}
+ {incr notifications}
+ {set asdf1 1 PXAT *}
+ {exec}
+ {multi}
+ {incr notifications}
+ {incr notifications}
+ {set asdf2 2 PXAT *}
+ {exec}
+ {multi}
+ {incr notifications}
+ {incr notifications}
+ {set asdf3 3 PXAT *}
+ {exec}
+ {multi}
+ {incr notifications}
+ {del asdf*}
+ {exec}
+ {multi}
+ {incr notifications}
+ {del asdf*}
+ {exec}
+ {multi}
+ {incr notifications}
+ {del asdf*}
+ {exec}
+ {multi}
+ {incr notifications}
+ {set asdf4 4}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {module propagation with timer and CONFIG SET maxmemory} {
+ set repl [attach_to_replication_stream]
+
+ $master config resetstat
+ $master config set maxmemory-policy volatile-random
+
+ $master propagate-test.timer-maxmemory
+
+ # Wait until the volatile keys are evicted
+ wait_for_condition 500 10 {
+ [s evicted_keys] eq 2
+ } else {
+ fail "Not all keys have been evicted"
+ }
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incr notifications}
+ {incr notifications}
+ {set timer-maxmemory-volatile-start 1 PXAT *}
+ {incr timer-maxmemory-middle}
+ {incr notifications}
+ {incr notifications}
+ {set timer-maxmemory-volatile-end 1 PXAT *}
+ {exec}
+ {multi}
+ {incr notifications}
+ {del timer-maxmemory-volatile-*}
+ {exec}
+ {multi}
+ {incr notifications}
+ {del timer-maxmemory-volatile-*}
+ {exec}
+ }
+ close_replication_stream $repl
+
+ $master config set maxmemory 0
+ $master config set maxmemory-policy noeviction
+ }
+
+ test {module propagation with timer and EVAL} {
+ set repl [attach_to_replication_stream]
+
+ $master propagate-test.timer-eval
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incr notifications}
+ {incrby timer-eval-start 1}
+ {incr notifications}
+ {set foo bar}
+ {incr timer-eval-middle}
+ {incr notifications}
+ {incrby timer-eval-end 1}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {module propagates nested ctx case1} {
+ set repl [attach_to_replication_stream]
+
+ $master propagate-test.timer-nested
+
+ wait_for_condition 500 10 {
+ [$replica get timer-nested-end] eq "1"
+ } else {
+ fail "The two counters don't match the expected value."
+ }
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incrby timer-nested-start 1}
+ {incrby timer-nested-end 1}
+ {exec}
+ }
+ close_replication_stream $repl
+
+ # Note propagate-test.timer-nested just propagates INCRBY, causing an
+ # inconsistency, so we flush
+ $master flushall
+ }
+
+ test {module propagates nested ctx case2} {
+ set repl [attach_to_replication_stream]
+
+ $master propagate-test.timer-nested-repl
+
+ wait_for_condition 500 10 {
+ [$replica get timer-nested-end] eq "1"
+ } else {
+ fail "The two counters don't match the expected value."
+ }
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incrby timer-nested-start 1}
+ {incr notifications}
+ {incr using-call}
+ {incr counter-1}
+ {incr counter-2}
+ {incr counter-3}
+ {incr counter-4}
+ {incr notifications}
+ {incr after-call}
+ {incr notifications}
+ {incr before-call-2}
+ {incr notifications}
+ {incr asdf}
+ {incr notifications}
+ {del asdf}
+ {incr notifications}
+ {incr after-call-2}
+ {incr notifications}
+ {incr timer-nested-middle}
+ {incrby timer-nested-end 1}
+ {exec}
+ }
+ close_replication_stream $repl
+
+ # Note propagate-test.timer-nested-repl just propagates INCRBY, causing an
+ # inconsistency, so we flush
+ $master flushall
+ }
+
+ test {module propagates from thread} {
+ set repl [attach_to_replication_stream]
+
+ $master propagate-test.thread
+
+ wait_for_condition 500 10 {
+ [$replica get a-from-thread] eq "3"
+ } else {
+ fail "The two counters don't match the expected value."
+ }
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incr a-from-thread}
+ {incr notifications}
+ {incr thread-call}
+ {incr b-from-thread}
+ {exec}
+ {multi}
+ {incr a-from-thread}
+ {incr notifications}
+ {incr thread-call}
+ {incr b-from-thread}
+ {exec}
+ {multi}
+ {incr a-from-thread}
+ {incr notifications}
+ {incr thread-call}
+ {incr b-from-thread}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {module propagates from thread with detached ctx} {
+ set repl [attach_to_replication_stream]
+
+ $master propagate-test.detached-thread
+
+ wait_for_condition 500 10 {
+ [$replica get thread-detached-after] eq "1"
+ } else {
+ fail "The key doesn't match the expected value."
+ }
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incr thread-detached-before}
+ {incr notifications}
+ {incr thread-detached-1}
+ {incr notifications}
+ {incr thread-detached-2}
+ {incr thread-detached-after}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {module propagates from command} {
+ set repl [attach_to_replication_stream]
+
+ $master propagate-test.simple
+ $master propagate-test.mixed
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incr counter-1}
+ {incr counter-2}
+ {exec}
+ {multi}
+ {incr notifications}
+ {incr using-call}
+ {incr counter-1}
+ {incr counter-2}
+ {incr notifications}
+ {incr after-call}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {module propagates from EVAL} {
+ set repl [attach_to_replication_stream]
+
+ assert_equal [ $master eval { \
+ redis.call("propagate-test.simple"); \
+ redis.call("set", "x", "y"); \
+ redis.call("propagate-test.mixed"); return "OK" } 0 ] {OK}
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incr counter-1}
+ {incr counter-2}
+ {incr notifications}
+ {set x y}
+ {incr notifications}
+ {incr using-call}
+ {incr counter-1}
+ {incr counter-2}
+ {incr notifications}
+ {incr after-call}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {module propagates from command after good EVAL} {
+ set repl [attach_to_replication_stream]
+
+ assert_equal [ $master eval { return "hello" } 0 ] {hello}
+ $master propagate-test.simple
+ $master propagate-test.mixed
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incr counter-1}
+ {incr counter-2}
+ {exec}
+ {multi}
+ {incr notifications}
+ {incr using-call}
+ {incr counter-1}
+ {incr counter-2}
+ {incr notifications}
+ {incr after-call}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {module propagates from command after bad EVAL} {
+ set repl [attach_to_replication_stream]
+
+ catch { $master eval { return "hello" } -12 } e
+ assert_equal $e {ERR Number of keys can't be negative}
+ $master propagate-test.simple
+ $master propagate-test.mixed
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incr counter-1}
+ {incr counter-2}
+ {exec}
+ {multi}
+ {incr notifications}
+ {incr using-call}
+ {incr counter-1}
+ {incr counter-2}
+ {incr notifications}
+ {incr after-call}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {module propagates from multi-exec} {
+ set repl [attach_to_replication_stream]
+
+ $master multi
+ $master propagate-test.simple
+ $master propagate-test.mixed
+ $master propagate-test.timer-nested-repl
+ $master exec
+
+ wait_for_condition 500 10 {
+ [$replica get timer-nested-end] eq "1"
+ } else {
+ fail "The two counters don't match the expected value."
+ }
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incr counter-1}
+ {incr counter-2}
+ {incr notifications}
+ {incr using-call}
+ {incr counter-1}
+ {incr counter-2}
+ {incr notifications}
+ {incr after-call}
+ {exec}
+ {multi}
+ {incrby timer-nested-start 1}
+ {incr notifications}
+ {incr using-call}
+ {incr counter-1}
+ {incr counter-2}
+ {incr counter-3}
+ {incr counter-4}
+ {incr notifications}
+ {incr after-call}
+ {incr notifications}
+ {incr before-call-2}
+ {incr notifications}
+ {incr asdf}
+ {incr notifications}
+ {del asdf}
+ {incr notifications}
+ {incr after-call-2}
+ {incr notifications}
+ {incr timer-nested-middle}
+ {incrby timer-nested-end 1}
+ {exec}
+ }
+ close_replication_stream $repl
+
+ # Note propagate-test.timer-nested just propagates INCRBY, causing an
+ # inconsistency, so we flush
+ $master flushall
+ }
+
+ test {module RM_Call of expired key propagation} {
+ $master debug set-active-expire 0
+
+ $master set k1 900 px 100
+ after 110
+
+ set repl [attach_to_replication_stream]
+ $master propagate-test.incr k1
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {del k1}
+ {propagate-test.incr k1}
+ {exec}
+ }
+ close_replication_stream $repl
+
+ assert_equal [$master get k1] 1
+ assert_equal [$master ttl k1] -1
+ assert_equal [$replica get k1] 1
+ assert_equal [$replica ttl k1] -1
+ }
+
+ test {module notification on set} {
+ set repl [attach_to_replication_stream]
+
+ $master SADD s foo
+
+ wait_for_condition 500 10 {
+ [$replica SCARD s] eq "1"
+ } else {
+ fail "Failed to wait for set to be replicated"
+ }
+
+ $master SPOP s 1
+
+ wait_for_condition 500 10 {
+ [$replica SCARD s] eq "0"
+ } else {
+ fail "Failed to wait for set to be replicated"
+ }
+
+ # Currently the `del` command comes after the notification.
+ # When we fix spop to fire notification at the end (like all other commands),
+ # the `del` will come first.
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {incr notifications}
+ {sadd s foo}
+ {exec}
+ {multi}
+ {incr notifications}
+ {incr notifications}
+ {del s}
+ {exec}
+ }
+ close_replication_stream $repl
+ }
+
+ test {module key miss notification do not cause read command to be replicated} {
+ set repl [attach_to_replication_stream]
+
+ $master flushall
+
+ $master get unexisting_key
+
+ wait_for_condition 500 10 {
+ [$replica get missed] eq "1"
+ } else {
+ fail "Failed to wait for set to be replicated"
+ }
+
+ # Test is checking a wrong!!! behavior that causes a read command to be replicated to replica/aof.
+ # We keep the test to verify that such a wrong behavior does not cause any crashes.
+ assert_replication_stream $repl {
+ {select *}
+ {flushall}
+ {multi}
+ {incr notifications}
+ {incr missed}
+ {get unexisting_key}
+ {exec}
+ }
+
+ close_replication_stream $repl
+ }
+
+ test "Unload the module - propagate-test/testkeyspace" {
+ assert_equal {OK} [r module unload propagate-test]
+ assert_equal {OK} [r module unload testkeyspace]
+ }
+
+ assert_equal [s -1 unexpected_error_replies] 0
+ }
+ }
+ }
+}
+
+
+tags "modules aof" {
+ foreach aofload_type {debug_cmd startup} {
+ test "Modules RM_Replicate replicates MULTI/EXEC correctly: AOF-load type $aofload_type" {
+ start_server [list overrides [list loadmodule "$testmodule"]] {
+ # Enable the AOF
+ r config set appendonly yes
+ r config set auto-aof-rewrite-percentage 0 ; # Disable auto-rewrite.
+ waitForBgrewriteaof r
+
+ r propagate-test.simple
+ r propagate-test.mixed
+ r multi
+ r propagate-test.simple
+ r propagate-test.mixed
+ r exec
+
+ assert_equal [r get counter-1] {}
+ assert_equal [r get counter-2] {}
+ assert_equal [r get using-call] 2
+ assert_equal [r get after-call] 2
+ assert_equal [r get notifications] 4
+
+ # Load the AOF
+ if {$aofload_type == "debug_cmd"} {
+ r debug loadaof
+ } else {
+ r config rewrite
+ restart_server 0 true false
+ wait_done_loading r
+ }
+
+ # This module behaves bad on purpose, it only calls
+ # RM_Replicate for counter-1 and counter-2 so values
+ # after AOF-load are different
+ assert_equal [r get counter-1] 4
+ assert_equal [r get counter-2] 4
+ assert_equal [r get using-call] 2
+ assert_equal [r get after-call] 2
+ # 4+4+2+2 commands from AOF (just above) + 4 "INCR notifications" from AOF + 4 notifications for these INCRs
+ assert_equal [r get notifications] 20
+
+ assert_equal {OK} [r module unload propagate-test]
+ assert_equal [s 0 unexpected_error_replies] 0
+ }
+ }
+ test "Modules RM_Call does not update stats during aof load: AOF-load type $aofload_type" {
+ start_server [list overrides [list loadmodule "$miscmodule"]] {
+ # Enable the AOF
+ r config set appendonly yes
+ r config set auto-aof-rewrite-percentage 0 ; # Disable auto-rewrite.
+ waitForBgrewriteaof r
+
+ r config resetstat
+ r set foo bar
+ r EVAL {return redis.call('SET', KEYS[1], ARGV[1])} 1 foo bar2
+ r test.rm_call_replicate set foo bar3
+ r EVAL {return redis.call('test.rm_call_replicate',ARGV[1],KEYS[1],ARGV[2])} 1 foo set bar4
+
+ r multi
+ r set foo bar5
+ r EVAL {return redis.call('SET', KEYS[1], ARGV[1])} 1 foo bar6
+ r test.rm_call_replicate set foo bar7
+ r EVAL {return redis.call('test.rm_call_replicate',ARGV[1],KEYS[1],ARGV[2])} 1 foo set bar8
+ r exec
+
+ assert_match {*calls=8,*,rejected_calls=0,failed_calls=0} [cmdrstat set r]
+
+
+ # Load the AOF
+ if {$aofload_type == "debug_cmd"} {
+ r config resetstat
+ r debug loadaof
+ } else {
+ r config rewrite
+ restart_server 0 true false
+ wait_done_loading r
+ }
+
+ assert_no_match {*calls=*} [cmdrstat set r]
+
+ }
+ }
+ }
+}
diff --git a/tests/unit/moduleapi/publish.tcl b/tests/unit/moduleapi/publish.tcl
new file mode 100644
index 0000000..a6304ea
--- /dev/null
+++ b/tests/unit/moduleapi/publish.tcl
@@ -0,0 +1,34 @@
+set testmodule [file normalize tests/modules/publish.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {PUBLISH and SPUBLISH via a module} {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+
+ assert_equal {1} [ssubscribe $rd1 {chan1}]
+ assert_equal {1} [subscribe $rd2 {chan1}]
+ assert_equal 1 [r publish.shard chan1 hello]
+ assert_equal 1 [r publish.classic chan1 world]
+ assert_equal {smessage chan1 hello} [$rd1 read]
+ assert_equal {message chan1 world} [$rd2 read]
+ $rd1 close
+ $rd2 close
+ }
+
+ test {module publish to self with multi message} {
+ r hello 3
+ r subscribe foo
+
+ # published message comes after the response of the command that issued it.
+ assert_equal [r publish.classic_multi foo bar vaz] {1 1}
+ assert_equal [r read] {message foo bar}
+ assert_equal [r read] {message foo vaz}
+
+ r unsubscribe foo
+ r hello 2
+ set _ ""
+ } {} {resp3}
+
+}
diff --git a/tests/unit/moduleapi/rdbloadsave.tcl b/tests/unit/moduleapi/rdbloadsave.tcl
new file mode 100644
index 0000000..9319c93
--- /dev/null
+++ b/tests/unit/moduleapi/rdbloadsave.tcl
@@ -0,0 +1,200 @@
+set testmodule [file normalize tests/modules/rdbloadsave.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test "Module rdbloadsave sanity" {
+ r test.sanity
+
+ # Try to load non-existing file
+ assert_error {*No such file or directory*} {r test.rdbload sanity.rdb}
+
+ r set x 1
+ assert_equal OK [r test.rdbsave sanity.rdb]
+
+ r flushdb
+ assert_equal OK [r test.rdbload sanity.rdb]
+ assert_equal 1 [r get x]
+ }
+
+ test "Module rdbloadsave test with pipelining" {
+ r config set save ""
+ r config set loading-process-events-interval-bytes 1024
+ r config set key-load-delay 50
+ r flushdb
+
+ populate 3000 a 1024
+ r set x 111
+ assert_equal [r dbsize] 3001
+
+ assert_equal OK [r test.rdbsave blabla.rdb]
+ r flushdb
+ assert_equal [r dbsize] 0
+
+ # Send commands with pipeline. First command will call RM_RdbLoad() in
+ # the command callback. While loading RDB, Redis can go to networking to
+ # reply -LOADING. By sending commands in pipeline, we verify it doesn't
+ # cause a problem.
+ # e.g. Redis won't try to process next message of the current client
+ # while it is in the command callback for that client .
+ set rd1 [redis_deferring_client]
+ $rd1 test.rdbload blabla.rdb
+
+ wait_for_condition 50 100 {
+ [s loading] eq 1
+ } else {
+ fail "Redis did not start loading or loaded RDB too fast"
+ }
+
+ $rd1 get x
+ $rd1 dbsize
+
+ assert_equal OK [$rd1 read]
+ assert_equal 111 [$rd1 read]
+ assert_equal 3001 [$rd1 read]
+ r flushdb
+ r config set key-load-delay 0
+ }
+
+ test "Module rdbloadsave with aof" {
+ r config set save ""
+
+ # Enable the AOF
+ r config set appendonly yes
+ r config set auto-aof-rewrite-percentage 0 ; # Disable auto-rewrite.
+ waitForBgrewriteaof r
+
+ r set k v1
+ assert_equal OK [r test.rdbsave aoftest.rdb]
+
+ r set k v2
+ r config set rdb-key-save-delay 10000000
+ r bgrewriteaof
+
+ # RM_RdbLoad() should kill aof fork
+ assert_equal OK [r test.rdbload aoftest.rdb]
+
+ wait_for_condition 50 100 {
+ [string match {*Killing*AOF*child*} [exec tail -20 < [srv 0 stdout]]]
+ } else {
+ fail "Can't find 'Killing AOF child' in recent log lines"
+ }
+
+ # Verify the value in the loaded rdb
+ assert_equal v1 [r get k]
+
+ r flushdb
+ r config set rdb-key-save-delay 0
+ r config set appendonly no
+ }
+
+ test "Module rdbloadsave with bgsave" {
+ r flushdb
+ r config set save ""
+
+ r set k v1
+ assert_equal OK [r test.rdbsave bgsave.rdb]
+
+ r set k v2
+ r config set rdb-key-save-delay 500000
+ r bgsave
+
+ # RM_RdbLoad() should kill RDB fork
+ assert_equal OK [r test.rdbload bgsave.rdb]
+
+ wait_for_condition 10 1000 {
+ [string match {*Background*saving*terminated*} [exec tail -20 < [srv 0 stdout]]]
+ } else {
+ fail "Can't find 'Background saving terminated' in recent log lines"
+ }
+
+ assert_equal v1 [r get k]
+ r flushall
+ waitForBgsave r
+ r config set rdb-key-save-delay 0
+ }
+
+ test "Module rdbloadsave calls rdbsave in a module fork" {
+ r flushdb
+ r config set save ""
+ r config set rdb-key-save-delay 500000
+
+ r set k v1
+
+ # Module will call RM_Fork() before calling RM_RdbSave()
+ assert_equal OK [r test.rdbsave_fork rdbfork.rdb]
+ assert_equal [s module_fork_in_progress] 1
+
+ wait_for_condition 10 1000 {
+ [status r module_fork_in_progress] == "0"
+ } else {
+ fail "Module fork didn't finish"
+ }
+
+ r set k v2
+ assert_equal OK [r test.rdbload rdbfork.rdb]
+ assert_equal v1 [r get k]
+
+ r config set rdb-key-save-delay 0
+ }
+
+ test "Unload the module - rdbloadsave" {
+ assert_equal {OK} [r module unload rdbloadsave]
+ }
+
+ tags {repl} {
+ test {Module rdbloadsave on master and replica} {
+ start_server [list overrides [list loadmodule "$testmodule"]] {
+ set replica [srv 0 client]
+ set replica_host [srv 0 host]
+ set replica_port [srv 0 port]
+ start_server [list overrides [list loadmodule "$testmodule"]] {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+
+ $master set x 10000
+
+ # Start the replication process...
+ $replica replicaof $master_host $master_port
+
+ wait_for_condition 100 100 {
+ [status $master sync_full] == 1
+ } else {
+ fail "Master <-> Replica didn't start the full sync"
+ }
+
+ # RM_RdbSave() is allowed on replicas
+ assert_equal OK [$replica test.rdbsave rep.rdb]
+
+ # RM_RdbLoad() is not allowed on replicas
+ assert_error {*supported*} {$replica test.rdbload rep.rdb}
+
+ assert_equal OK [$master test.rdbsave master.rdb]
+ $master set x 20000
+
+ wait_for_condition 100 100 {
+ [$replica get x] == 20000
+ } else {
+ fail "Replica didn't get the update"
+ }
+
+ # Loading RDB on master will drop replicas
+ assert_equal OK [$master test.rdbload master.rdb]
+
+ wait_for_condition 100 100 {
+ [status $master sync_full] == 2
+ } else {
+ fail "Master <-> Replica didn't start the full sync"
+ }
+
+ wait_for_condition 100 100 {
+ [$replica get x] == 10000
+ } else {
+ fail "Replica didn't get the update"
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/unit/moduleapi/reply.tcl b/tests/unit/moduleapi/reply.tcl
new file mode 100644
index 0000000..3cf284d
--- /dev/null
+++ b/tests/unit/moduleapi/reply.tcl
@@ -0,0 +1,152 @@
+set testmodule [file normalize tests/modules/reply.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ # test all with hello 2/3
+ for {set proto 2} {$proto <= 3} {incr proto} {
+ if {[lsearch $::denytags "resp3"] >= 0} {
+ if {$proto == 3} {continue}
+ } elseif {$::force_resp3} {
+ if {$proto == 2} {continue}
+ }
+ r hello $proto
+
+ test "RESP$proto: RM_ReplyWithString: an string reply" {
+ # RedisString
+ set string [r rw.string "Redis"]
+ assert_equal "Redis" $string
+ # C string
+ set string [r rw.cstring]
+ assert_equal "A simple string" $string
+ }
+
+ test "RESP$proto: RM_ReplyWithBigNumber: an string reply" {
+ assert_equal "123456778901234567890" [r rw.bignumber "123456778901234567890"]
+ }
+
+ test "RESP$proto: RM_ReplyWithInt: an integer reply" {
+ assert_equal 42 [r rw.int 42]
+ }
+
+ test "RESP$proto: RM_ReplyWithDouble: a float reply" {
+ assert_equal 3.141 [r rw.double 3.141]
+ }
+
+ test "RESP$proto: RM_ReplyWithDouble: inf" {
+ if {$proto == 2} {
+ assert_equal "inf" [r rw.double inf]
+ assert_equal "-inf" [r rw.double -inf]
+ } else {
+ # TCL convert inf to different results on different platforms, e.g. inf on mac
+ # and Inf on others, so use readraw to verify the protocol
+ r readraw 1
+ assert_equal ",inf" [r rw.double inf]
+ assert_equal ",-inf" [r rw.double -inf]
+ r readraw 0
+ }
+ }
+
+ test "RESP$proto: RM_ReplyWithDouble: NaN" {
+ if {$proto == 2} {
+ assert_equal "nan" [r rw.double 0 0]
+ assert_equal "nan" [r rw.double]
+ } else {
+ # TCL won't convert nan into a double, use readraw to verify the protocol
+ r readraw 1
+ assert_equal ",nan" [r rw.double 0 0]
+ assert_equal ",nan" [r rw.double]
+ r readraw 0
+ }
+ }
+
+ set ld 0.00000000000000001
+ test "RESP$proto: RM_ReplyWithLongDouble: a float reply" {
+ if {$proto == 2} {
+ # here the response gets to TCL as a string
+ assert_equal $ld [r rw.longdouble $ld]
+ } else {
+ # TCL doesn't support long double and the test infra converts it to a
+ # normal double which causes precision loss. so we use readraw instead
+ r readraw 1
+ assert_equal ",$ld" [r rw.longdouble $ld]
+ r readraw 0
+ }
+ }
+
+ test "RESP$proto: RM_ReplyWithVerbatimString: a string reply" {
+ assert_equal "bla\nbla\nbla" [r rw.verbatim "bla\nbla\nbla"]
+ }
+
+ test "RESP$proto: RM_ReplyWithArray: an array reply" {
+ assert_equal {0 1 2 3 4} [r rw.array 5]
+ }
+
+ test "RESP$proto: RM_ReplyWithMap: an map reply" {
+ set res [r rw.map 3]
+ if {$proto == 2} {
+ assert_equal {0 0 1 1.5 2 3} $res
+ } else {
+ assert_equal [dict create 0 0.0 1 1.5 2 3.0] $res
+ }
+ }
+
+ test "RESP$proto: RM_ReplyWithSet: an set reply" {
+ assert_equal {0 1 2} [r rw.set 3]
+ }
+
+ test "RESP$proto: RM_ReplyWithAttribute: an set reply" {
+ if {$proto == 2} {
+ catch {[r rw.attribute 3]} e
+ assert_match "Attributes aren't supported by RESP 2" $e
+ } else {
+ r readraw 1
+ set res [r rw.attribute 3]
+ assert_equal [r read] {:0}
+ assert_equal [r read] {,0}
+ assert_equal [r read] {:1}
+ assert_equal [r read] {,1.5}
+ assert_equal [r read] {:2}
+ assert_equal [r read] {,3}
+ assert_equal [r read] {+OK}
+ r readraw 0
+ }
+ }
+
+ test "RESP$proto: RM_ReplyWithBool: a boolean reply" {
+ assert_equal {0 1} [r rw.bool]
+ }
+
+ test "RESP$proto: RM_ReplyWithNull: a NULL reply" {
+ assert_equal {} [r rw.null]
+ }
+
+ test "RESP$proto: RM_ReplyWithError: an error reply" {
+ catch {r rw.error} e
+ assert_match "An error" $e
+ }
+
+ test "RESP$proto: RM_ReplyWithErrorFormat: error format reply" {
+ catch {r rw.error_format "An error: %s" foo} e
+ assert_match "An error: foo" $e ;# Should not be used by a user, but compatible with RM_ReplyError
+
+ catch {r rw.error_format "-ERR An error: %s" foo2} e
+ assert_match "-ERR An error: foo2" $e ;# Should not be used by a user, but compatible with RM_ReplyError (There are two hyphens, TCL removes the first one)
+
+ catch {r rw.error_format "-WRONGTYPE A type error: %s" foo3} e
+ assert_match "-WRONGTYPE A type error: foo3" $e ;# Should not be used by a user, but compatible with RM_ReplyError (There are two hyphens, TCL removes the first one)
+
+ catch {r rw.error_format "ERR An error: %s" foo4} e
+ assert_match "ERR An error: foo4" $e
+
+ catch {r rw.error_format "WRONGTYPE A type error: %s" foo5} e
+ assert_match "WRONGTYPE A type error: foo5" $e
+ }
+
+ r hello 2
+ }
+
+ test "Unload the module - replywith" {
+ assert_equal {OK} [r module unload replywith]
+ }
+}
diff --git a/tests/unit/moduleapi/scan.tcl b/tests/unit/moduleapi/scan.tcl
new file mode 100644
index 0000000..1efd6ac
--- /dev/null
+++ b/tests/unit/moduleapi/scan.tcl
@@ -0,0 +1,69 @@
+set testmodule [file normalize tests/modules/scan.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {Module scan keyspace} {
+ # the module create a scan command with filtering which also return values
+ r set x 1
+ r set y 2
+ r set z 3
+ r hset h f v
+ lsort [r scan.scan_strings]
+ } {{x 1} {y 2} {z 3}}
+
+ test {Module scan hash listpack} {
+ r hmset hh f1 v1 f2 v2
+ assert_encoding listpack hh
+ lsort [r scan.scan_key hh]
+ } {{f1 v1} {f2 v2}}
+
+ test {Module scan hash listpack with int value} {
+ r hmset hh1 f1 1
+ assert_encoding listpack hh1
+ lsort [r scan.scan_key hh1]
+ } {{f1 1}}
+
+ test {Module scan hash dict} {
+ r config set hash-max-ziplist-entries 2
+ r hmset hh f3 v3
+ assert_encoding hashtable hh
+ lsort [r scan.scan_key hh]
+ } {{f1 v1} {f2 v2} {f3 v3}}
+
+ test {Module scan zset listpack} {
+ r zadd zz 1 f1 2 f2
+ assert_encoding listpack zz
+ lsort [r scan.scan_key zz]
+ } {{f1 1} {f2 2}}
+
+ test {Module scan zset skiplist} {
+ r config set zset-max-ziplist-entries 2
+ r zadd zz 3 f3
+ assert_encoding skiplist zz
+ lsort [r scan.scan_key zz]
+ } {{f1 1} {f2 2} {f3 3}}
+
+ test {Module scan set intset} {
+ r sadd ss 1 2
+ assert_encoding intset ss
+ lsort [r scan.scan_key ss]
+ } {{1 {}} {2 {}}}
+
+ test {Module scan set dict} {
+ r config set set-max-intset-entries 2
+ r sadd ss 3
+ assert_encoding hashtable ss
+ lsort [r scan.scan_key ss]
+ } {{1 {}} {2 {}} {3 {}}}
+
+ test {Module scan set listpack} {
+ r sadd ss1 a b c
+ assert_encoding listpack ss1
+ lsort [r scan.scan_key ss1]
+ } {{a {}} {b {}} {c {}}}
+
+ test "Unload the module - scan" {
+ assert_equal {OK} [r module unload scan]
+ }
+} \ No newline at end of file
diff --git a/tests/unit/moduleapi/stream.tcl b/tests/unit/moduleapi/stream.tcl
new file mode 100644
index 0000000..7ad1a30
--- /dev/null
+++ b/tests/unit/moduleapi/stream.tcl
@@ -0,0 +1,176 @@
+set testmodule [file normalize tests/modules/stream.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {Module stream add and delete} {
+ r del mystream
+ # add to empty key
+ set streamid1 [r stream.add mystream item 1 value a]
+ # add to existing stream
+ set streamid2 [r stream.add mystream item 2 value b]
+ # check result
+ assert { [string match "*-*" $streamid1] }
+ set items [r XRANGE mystream - +]
+ assert_equal $items \
+ "{$streamid1 {item 1 value a}} {$streamid2 {item 2 value b}}"
+ # delete one of them and try deleting non-existing ID
+ assert_equal OK [r stream.delete mystream $streamid1]
+ assert_error "ERR StreamDelete*" {r stream.delete mystream 123-456}
+ assert_error "Invalid stream ID*" {r stream.delete mystream foo}
+ assert_equal "{$streamid2 {item 2 value b}}" [r XRANGE mystream - +]
+ # check error condition: wrong type
+ r del mystream
+ r set mystream mystring
+ assert_error "ERR StreamAdd*" {r stream.add mystream item 1 value a}
+ assert_error "ERR StreamDelete*" {r stream.delete mystream 123-456}
+ }
+
+ test {Module stream add unblocks blocking xread} {
+ r del mystream
+
+ # Blocking XREAD on an empty key
+ set rd1 [redis_deferring_client]
+ $rd1 XREAD BLOCK 3000 STREAMS mystream $
+ # wait until client is actually blocked
+ wait_for_condition 50 100 {
+ [s 0 blocked_clients] eq {1}
+ } else {
+ fail "Client is not blocked"
+ }
+ set id [r stream.add mystream field 1 value a]
+ assert_equal "{mystream {{$id {field 1 value a}}}}" [$rd1 read]
+
+ # Blocking XREAD on an existing stream
+ set rd2 [redis_deferring_client]
+ $rd2 XREAD BLOCK 3000 STREAMS mystream $
+ # wait until client is actually blocked
+ wait_for_condition 50 100 {
+ [s 0 blocked_clients] eq {1}
+ } else {
+ fail "Client is not blocked"
+ }
+ set id [r stream.add mystream field 2 value b]
+ assert_equal "{mystream {{$id {field 2 value b}}}}" [$rd2 read]
+ }
+
+ test {Module stream add benchmark (1M stream add)} {
+ set n 1000000
+ r del mystream
+ set result [r stream.addn mystream $n field value]
+ assert_equal $result $n
+ }
+
+ test {Module stream XADD big fields doesn't create empty key} {
+ set original_proto [config_get_set proto-max-bulk-len 2147483647] ;#2gb
+ set original_query [config_get_set client-query-buffer-limit 2147483647] ;#2gb
+
+ r del mystream
+ r write "*4\r\n\$10\r\nstream.add\r\n\$8\r\nmystream\r\n\$5\r\nfield\r\n"
+ catch {
+ write_big_bulk 1073741824 ;#1gb
+ } err
+ assert {$err eq "ERR StreamAdd failed"}
+ assert_equal 0 [r exists mystream]
+
+ # restore defaults
+ r config set proto-max-bulk-len $original_proto
+ r config set client-query-buffer-limit $original_query
+ } {OK} {large-memory}
+
+ test {Module stream iterator} {
+ r del mystream
+ set streamid1 [r xadd mystream * item 1 value a]
+ set streamid2 [r xadd mystream * item 2 value b]
+ # range result
+ set result1 [r stream.range mystream "-" "+"]
+ set expect1 [r xrange mystream "-" "+"]
+ assert_equal $result1 $expect1
+ # reverse range
+ set result_rev [r stream.range mystream "+" "-"]
+ set expect_rev [r xrevrange mystream "+" "-"]
+ assert_equal $result_rev $expect_rev
+
+ # only one item: range with startid = endid
+ set result2 [r stream.range mystream "-" $streamid1]
+ assert_equal $result2 "{$streamid1 {item 1 value a}}"
+ assert_equal $result2 [list [list $streamid1 {item 1 value a}]]
+ # only one item: range with startid = endid
+ set result3 [r stream.range mystream $streamid2 $streamid2]
+ assert_equal $result3 "{$streamid2 {item 2 value b}}"
+ assert_equal $result3 [list [list $streamid2 {item 2 value b}]]
+ }
+
+ test {Module stream iterator delete} {
+ r del mystream
+ set id1 [r xadd mystream * normal item]
+ set id2 [r xadd mystream * selfdestruct yes]
+ set id3 [r xadd mystream * another item]
+ # stream.range deletes the "selfdestruct" item after returning it
+ assert_equal \
+ "{$id1 {normal item}} {$id2 {selfdestruct yes}} {$id3 {another item}}" \
+ [r stream.range mystream - +]
+ # now, the "selfdestruct" item is gone
+ assert_equal \
+ "{$id1 {normal item}} {$id3 {another item}}" \
+ [r stream.range mystream - +]
+ }
+
+ test {Module stream trim by length} {
+ r del mystream
+ # exact maxlen
+ r xadd mystream * item 1 value a
+ r xadd mystream * item 2 value b
+ r xadd mystream * item 3 value c
+ assert_equal 3 [r xlen mystream]
+ assert_equal 0 [r stream.trim mystream maxlen = 5]
+ assert_equal 3 [r xlen mystream]
+ assert_equal 2 [r stream.trim mystream maxlen = 1]
+ assert_equal 1 [r xlen mystream]
+ assert_equal 1 [r stream.trim mystream maxlen = 0]
+ # check that there is no limit for exact maxlen
+ r stream.addn mystream 20000 item x value y
+ assert_equal 20000 [r stream.trim mystream maxlen = 0]
+ # approx maxlen (100 items per node implies default limit 10K items)
+ r stream.addn mystream 20000 item x value y
+ assert_equal 20000 [r xlen mystream]
+ assert_equal 10000 [r stream.trim mystream maxlen ~ 2]
+ assert_equal 9900 [r stream.trim mystream maxlen ~ 2]
+ assert_equal 0 [r stream.trim mystream maxlen ~ 2]
+ assert_equal 100 [r xlen mystream]
+ assert_equal 100 [r stream.trim mystream maxlen ~ 0]
+ assert_equal 0 [r xlen mystream]
+ }
+
+ test {Module stream trim by ID} {
+ r del mystream
+ # exact minid
+ r xadd mystream * item 1 value a
+ r xadd mystream * item 2 value b
+ set minid [r xadd mystream * item 3 value c]
+ assert_equal 3 [r xlen mystream]
+ assert_equal 0 [r stream.trim mystream minid = -]
+ assert_equal 3 [r xlen mystream]
+ assert_equal 2 [r stream.trim mystream minid = $minid]
+ assert_equal 1 [r xlen mystream]
+ assert_equal 1 [r stream.trim mystream minid = +]
+ # check that there is no limit for exact minid
+ r stream.addn mystream 20000 item x value y
+ assert_equal 20000 [r stream.trim mystream minid = +]
+ # approx minid (100 items per node implies default limit 10K items)
+ r stream.addn mystream 19980 item x value y
+ set minid [r xadd mystream * item x value y]
+ r stream.addn mystream 19 item x value y
+ assert_equal 20000 [r xlen mystream]
+ assert_equal 10000 [r stream.trim mystream minid ~ $minid]
+ assert_equal 9900 [r stream.trim mystream minid ~ $minid]
+ assert_equal 0 [r stream.trim mystream minid ~ $minid]
+ assert_equal 100 [r xlen mystream]
+ assert_equal 100 [r stream.trim mystream minid ~ +]
+ assert_equal 0 [r xlen mystream]
+ }
+
+ test "Unload the module - stream" {
+ assert_equal {OK} [r module unload stream]
+ }
+}
diff --git a/tests/unit/moduleapi/subcommands.tcl b/tests/unit/moduleapi/subcommands.tcl
new file mode 100644
index 0000000..62de593
--- /dev/null
+++ b/tests/unit/moduleapi/subcommands.tcl
@@ -0,0 +1,57 @@
+set testmodule [file normalize tests/modules/subcommands.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test "Module subcommands via COMMAND" {
+ # Verify that module subcommands are displayed correctly in COMMAND
+ set command_reply [r command info subcommands.bitarray]
+ set first_cmd [lindex $command_reply 0]
+ set subcmds_in_command [lsort [lindex $first_cmd 9]]
+ assert_equal [lindex $subcmds_in_command 0] {subcommands.bitarray|get -2 module 1 1 1 {} {} {{flags {RO access} begin_search {type index spec {index 1}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}}} {}}
+ assert_equal [lindex $subcmds_in_command 1] {subcommands.bitarray|set -2 module 1 1 1 {} {} {{flags {RW update} begin_search {type index spec {index 1}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}}} {}}
+
+ # Verify that module subcommands are displayed correctly in COMMAND DOCS
+ set docs_reply [r command docs subcommands.bitarray]
+ set docs [dict create {*}[lindex $docs_reply 1]]
+ set subcmds_in_cmd_docs [dict create {*}[dict get $docs subcommands]]
+ assert_equal [dict get $subcmds_in_cmd_docs "subcommands.bitarray|get"] {group module module subcommands}
+ assert_equal [dict get $subcmds_in_cmd_docs "subcommands.bitarray|set"] {group module module subcommands}
+ }
+
+ test "Module pure-container command fails on arity error" {
+ catch {r subcommands.bitarray} e
+ assert_match {*wrong number of arguments for 'subcommands.bitarray' command} $e
+
+ # Subcommands can be called
+ assert_equal [r subcommands.bitarray get k1] {OK}
+
+ # Subcommand arity error
+ catch {r subcommands.bitarray get k1 8 90} e
+ assert_match {*wrong number of arguments for 'subcommands.bitarray|get' command} $e
+ }
+
+ test "Module get current command fullname" {
+ assert_equal [r subcommands.parent_get_fullname] {subcommands.parent_get_fullname}
+ }
+
+ test "Module get current subcommand fullname" {
+ assert_equal [r subcommands.sub get_fullname] {subcommands.sub|get_fullname}
+ }
+
+ test "COMMAND LIST FILTERBY MODULE" {
+ assert_equal {} [r command list filterby module non_existing]
+
+ set commands [r command list filterby module subcommands]
+ assert_not_equal [lsearch $commands "subcommands.bitarray"] -1
+ assert_not_equal [lsearch $commands "subcommands.bitarray|set"] -1
+ assert_not_equal [lsearch $commands "subcommands.parent_get_fullname"] -1
+ assert_not_equal [lsearch $commands "subcommands.sub|get_fullname"] -1
+
+ assert_equal [lsearch $commands "set"] -1
+ }
+
+ test "Unload the module - subcommands" {
+ assert_equal {OK} [r module unload subcommands]
+ }
+}
diff --git a/tests/unit/moduleapi/test_lazyfree.tcl b/tests/unit/moduleapi/test_lazyfree.tcl
new file mode 100644
index 0000000..8d2c55a
--- /dev/null
+++ b/tests/unit/moduleapi/test_lazyfree.tcl
@@ -0,0 +1,32 @@
+set testmodule [file normalize tests/modules/test_lazyfree.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test "modules allocated memory can be reclaimed in the background" {
+ set orig_mem [s used_memory]
+ set rd [redis_deferring_client]
+
+ # LAZYFREE_THRESHOLD is 64
+ for {set i 0} {$i < 10000} {incr i} {
+ $rd lazyfreelink.insert lazykey $i
+ }
+
+ for {set j 0} {$j < 10000} {incr j} {
+ $rd read
+ }
+
+ assert {[r lazyfreelink.len lazykey] == 10000}
+
+ set peak_mem [s used_memory]
+ assert {[r unlink lazykey] == 1}
+ assert {$peak_mem > $orig_mem+10000}
+ wait_for_condition 50 100 {
+ [s used_memory] < $peak_mem &&
+ [s used_memory] < $orig_mem*2 &&
+ [string match {*lazyfreed_objects:1*} [r info Memory]]
+ } else {
+ fail "Module memory is not reclaimed by UNLINK"
+ }
+ }
+}
diff --git a/tests/unit/moduleapi/testrdb.tcl b/tests/unit/moduleapi/testrdb.tcl
new file mode 100644
index 0000000..ae3036f
--- /dev/null
+++ b/tests/unit/moduleapi/testrdb.tcl
@@ -0,0 +1,306 @@
+# This module can be configure with multiple options given as flags on module load time
+# 0 - not aux fields will be declared (this is the default)
+# 1 << 0 - use aux_save2 api
+# 1 << 1 - call aux callback before key space
+# 1 << 2 - call aux callback after key space
+# 1 << 3 - do not save data on aux callback
+set testmodule [file normalize tests/modules/testrdb.so]
+
+tags "modules" {
+ test {modules are able to persist types} {
+ start_server [list overrides [list loadmodule "$testmodule"]] {
+ r testrdb.set.key key1 value1
+ assert_equal "value1" [r testrdb.get.key key1]
+ r debug reload
+ assert_equal "value1" [r testrdb.get.key key1]
+ }
+ }
+
+ test {modules global are lost without aux} {
+ set server_path [tmpdir "server.module-testrdb"]
+ start_server [list overrides [list loadmodule "$testmodule" "dir" $server_path] keep_persistence true] {
+ r testrdb.set.before global1
+ assert_equal "global1" [r testrdb.get.before]
+ }
+ start_server [list overrides [list loadmodule "$testmodule" "dir" $server_path]] {
+ assert_equal "" [r testrdb.get.before]
+ }
+ }
+
+ test {aux that saves no data are not saved to the rdb when aux_save2 is used} {
+ set server_path [tmpdir "server.module-testrdb"]
+ puts $server_path
+ # 15 == 1111 - use aux_save2 before and after key space without data
+ start_server [list overrides [list loadmodule "$testmodule 15" "dir" $server_path] keep_persistence true] {
+ r set x 1
+ r save
+ }
+ start_server [list overrides [list "dir" $server_path] keep_persistence true] {
+ # make sure server started successfully without the module.
+ assert_equal {1} [r get x]
+ }
+ }
+
+ test {aux that saves no data are saved to the rdb when aux_save is used} {
+ set server_path [tmpdir "server.module-testrdb"]
+ puts $server_path
+ # 14 == 1110 - use aux_save before and after key space without data
+ start_server [list overrides [list loadmodule "$testmodule 14" "dir" $server_path] keep_persistence true] {
+ r set x 1
+ r save
+ }
+ start_server [list overrides [list loadmodule "$testmodule 14" "dir" $server_path] keep_persistence true] {
+ # make sure server started successfully and aux_save was called twice.
+ assert_equal {1} [r get x]
+ assert_equal {2} [r testrdb.get.n_aux_load_called]
+ }
+ }
+
+ foreach test_case {6 7} {
+ # 6 == 0110 - use aux_save before and after key space with data
+ # 7 == 0111 - use aux_save2 before and after key space with data
+ test {modules are able to persist globals before and after} {
+ set server_path [tmpdir "server.module-testrdb"]
+ start_server [list overrides [list loadmodule "$testmodule $test_case" "dir" $server_path "save" "900 1"] keep_persistence true] {
+ r testrdb.set.before global1
+ r testrdb.set.after global2
+ assert_equal "global1" [r testrdb.get.before]
+ assert_equal "global2" [r testrdb.get.after]
+ }
+ start_server [list overrides [list loadmodule "$testmodule $test_case" "dir" $server_path "save" "900 1"]] {
+ assert_equal "global1" [r testrdb.get.before]
+ assert_equal "global2" [r testrdb.get.after]
+ }
+
+ }
+ }
+
+ foreach test_case {4 5} {
+ # 4 == 0100 - use aux_save after key space with data
+ # 5 == 0101 - use aux_save2 after key space with data
+ test {modules are able to persist globals just after} {
+ set server_path [tmpdir "server.module-testrdb"]
+ start_server [list overrides [list loadmodule "$testmodule $test_case" "dir" $server_path "save" "900 1"] keep_persistence true] {
+ r testrdb.set.after global2
+ assert_equal "global2" [r testrdb.get.after]
+ }
+ start_server [list overrides [list loadmodule "$testmodule $test_case" "dir" $server_path "save" "900 1"]] {
+ assert_equal "global2" [r testrdb.get.after]
+ }
+ }
+ }
+
+ test {Verify module options info} {
+ start_server [list overrides [list loadmodule "$testmodule"]] {
+ assert_match "*\[handle-io-errors|handle-repl-async-load\]*" [r info modules]
+ }
+ }
+
+ tags {repl} {
+ test {diskless loading short read with module} {
+ start_server [list overrides [list loadmodule "$testmodule"]] {
+ set replica [srv 0 client]
+ set replica_host [srv 0 host]
+ set replica_port [srv 0 port]
+ start_server [list overrides [list loadmodule "$testmodule"]] {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+
+ # Set master and replica to use diskless replication
+ $master config set repl-diskless-sync yes
+ $master config set rdbcompression no
+ $replica config set repl-diskless-load swapdb
+ $master config set hz 500
+ $replica config set hz 500
+ $master config set dynamic-hz no
+ $replica config set dynamic-hz no
+ set start [clock clicks -milliseconds]
+ for {set k 0} {$k < 30} {incr k} {
+ r testrdb.set.key key$k [string repeat A [expr {int(rand()*1000000)}]]
+ }
+
+ if {$::verbose} {
+ set end [clock clicks -milliseconds]
+ set duration [expr $end - $start]
+ puts "filling took $duration ms (TODO: use pipeline)"
+ set start [clock clicks -milliseconds]
+ }
+
+ # Start the replication process...
+ set loglines [count_log_lines -1]
+ $master config set repl-diskless-sync-delay 0
+ $replica replicaof $master_host $master_port
+
+ # kill the replication at various points
+ set attempts 100
+ if {$::accurate} { set attempts 500 }
+ for {set i 0} {$i < $attempts} {incr i} {
+ # wait for the replica to start reading the rdb
+ # using the log file since the replica only responds to INFO once in 2mb
+ set res [wait_for_log_messages -1 {"*Loading DB in memory*"} $loglines 2000 1]
+ set loglines [lindex $res 1]
+
+ # add some additional random sleep so that we kill the master on a different place each time
+ after [expr {int(rand()*50)}]
+
+ # kill the replica connection on the master
+ set killed [$master client kill type replica]
+
+ set res [wait_for_log_messages -1 {"*Internal error in RDB*" "*Finished with success*" "*Successful partial resynchronization*"} $loglines 500 10]
+ if {$::verbose} { puts $res }
+ set log_text [lindex $res 0]
+ set loglines [lindex $res 1]
+ if {![string match "*Internal error in RDB*" $log_text]} {
+ # force the replica to try another full sync
+ $master multi
+ $master client kill type replica
+ $master set asdf asdf
+ # fill replication backlog with new content
+ $master config set repl-backlog-size 16384
+ for {set keyid 0} {$keyid < 10} {incr keyid} {
+ $master set "$keyid string_$keyid" [string repeat A 16384]
+ }
+ $master exec
+ }
+
+ # wait for loading to stop (fail)
+ # After a loading successfully, next loop will enter `async_loading`
+ wait_for_condition 1000 1 {
+ [s -1 async_loading] eq 0 &&
+ [s -1 loading] eq 0
+ } else {
+ fail "Replica didn't disconnect"
+ }
+ }
+ if {$::verbose} {
+ set end [clock clicks -milliseconds]
+ set duration [expr $end - $start]
+ puts "test took $duration ms"
+ }
+ # enable fast shutdown
+ $master config set rdb-key-save-delay 0
+ }
+ }
+ }
+
+ # Module events for diskless load swapdb when async_loading (matching master replid)
+ foreach test_case {6 7} {
+ # 6 == 0110 - use aux_save before and after key space with data
+ # 7 == 0111 - use aux_save2 before and after key space with data
+ foreach testType {Successful Aborted} {
+ start_server [list overrides [list loadmodule "$testmodule $test_case"] tags [list external:skip]] {
+ set replica [srv 0 client]
+ set replica_host [srv 0 host]
+ set replica_port [srv 0 port]
+ set replica_log [srv 0 stdout]
+ start_server [list overrides [list loadmodule "$testmodule $test_case"]] {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+
+ set start [clock clicks -milliseconds]
+
+ # Set master and replica to use diskless replication on swapdb mode
+ $master config set repl-diskless-sync yes
+ $master config set repl-diskless-sync-delay 0
+ $master config set save ""
+ $replica config set repl-diskless-load swapdb
+ $replica config set save ""
+
+ # Initial sync to have matching replids between master and replica
+ $replica replicaof $master_host $master_port
+
+ # Let replica finish initial sync with master
+ wait_for_condition 100 100 {
+ [s -1 master_link_status] eq "up"
+ } else {
+ fail "Master <-> Replica didn't finish sync"
+ }
+
+ # Set global values on module so we can check if module event callbacks will pick it up correctly
+ $master testrdb.set.before value1_master
+ $replica testrdb.set.before value1_replica
+
+ # Put different data sets on the master and replica
+ # We need to put large keys on the master since the replica replies to info only once in 2mb
+ $replica debug populate 200 slave 10
+ $master debug populate 1000 master 100000
+ $master config set rdbcompression no
+
+ # Force the replica to try another full sync (this time it will have matching master replid)
+ $master multi
+ $master client kill type replica
+ # Fill replication backlog with new content
+ $master config set repl-backlog-size 16384
+ for {set keyid 0} {$keyid < 10} {incr keyid} {
+ $master set "$keyid string_$keyid" [string repeat A 16384]
+ }
+ $master exec
+
+ switch $testType {
+ "Aborted" {
+ # Set master with a slow rdb generation, so that we can easily intercept loading
+ # 10ms per key, with 1000 keys is 10 seconds
+ $master config set rdb-key-save-delay 10000
+
+ test {Diskless load swapdb RedisModuleEvent_ReplAsyncLoad handling: during loading, can keep module variable same as before} {
+ # Wait for the replica to start reading the rdb and module for acknowledgement
+ # We wanna abort only after the temp db was populated by REDISMODULE_AUX_BEFORE_RDB
+ wait_for_condition 100 100 {
+ [s -1 async_loading] eq 1 && [$replica testrdb.async_loading.get.before] eq "value1_master"
+ } else {
+ fail "Module didn't receive or react to REDISMODULE_SUBEVENT_REPL_ASYNC_LOAD_STARTED"
+ }
+
+ assert_equal [$replica dbsize] 200
+ assert_equal value1_replica [$replica testrdb.get.before]
+ }
+
+ # Make sure that next sync will not start immediately so that we can catch the replica in between syncs
+ $master config set repl-diskless-sync-delay 5
+
+ # Kill the replica connection on the master
+ set killed [$master client kill type replica]
+
+ test {Diskless load swapdb RedisModuleEvent_ReplAsyncLoad handling: when loading aborted, can keep module variable same as before} {
+ # Wait for loading to stop (fail) and module for acknowledgement
+ wait_for_condition 100 100 {
+ [s -1 async_loading] eq 0 && [$replica testrdb.async_loading.get.before] eq ""
+ } else {
+ fail "Module didn't receive or react to REDISMODULE_SUBEVENT_REPL_ASYNC_LOAD_ABORTED"
+ }
+
+ assert_equal [$replica dbsize] 200
+ assert_equal value1_replica [$replica testrdb.get.before]
+ }
+
+ # Speed up shutdown
+ $master config set rdb-key-save-delay 0
+ }
+ "Successful" {
+ # Let replica finish sync with master
+ wait_for_condition 100 100 {
+ [s -1 master_link_status] eq "up"
+ } else {
+ fail "Master <-> Replica didn't finish sync"
+ }
+
+ test {Diskless load swapdb RedisModuleEvent_ReplAsyncLoad handling: after db loaded, can set module variable with new value} {
+ assert_equal [$replica dbsize] 1010
+ assert_equal value1_master [$replica testrdb.get.before]
+ }
+ }
+ }
+
+ if {$::verbose} {
+ set end [clock clicks -milliseconds]
+ set duration [expr $end - $start]
+ puts "test took $duration ms"
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/unit/moduleapi/timer.tcl b/tests/unit/moduleapi/timer.tcl
new file mode 100644
index 0000000..4e9dd0f
--- /dev/null
+++ b/tests/unit/moduleapi/timer.tcl
@@ -0,0 +1,99 @@
+set testmodule [file normalize tests/modules/timer.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {RM_CreateTimer: a sequence of timers work} {
+ # We can't guarantee same-ms but we try using MULTI/EXEC
+ r multi
+ for {set i 0} {$i < 20} {incr i} {
+ r test.createtimer 10 timer-incr-key
+ }
+ r exec
+
+ after 500
+ assert_equal 20 [r get timer-incr-key]
+ }
+
+ test {RM_GetTimer: basic sanity} {
+ # Getting non-existing timer
+ assert_equal {} [r test.gettimer 0]
+
+ # Getting a real timer
+ set id [r test.createtimer 10000 timer-incr-key]
+ set info [r test.gettimer $id]
+
+ assert_equal "timer-incr-key" [lindex $info 0]
+ set remaining [lindex $info 1]
+ assert {$remaining < 10000 && $remaining > 1}
+ # Stop the timer after get timer test
+ assert_equal 1 [r test.stoptimer $id]
+ }
+
+ test {RM_StopTimer: basic sanity} {
+ r set "timer-incr-key" 0
+ set id [r test.createtimer 1000 timer-incr-key]
+
+ assert_equal 1 [r test.stoptimer $id]
+
+ # Wait to be sure timer doesn't execute
+ after 2000
+ assert_equal 0 [r get timer-incr-key]
+
+ # Stop non-existing timer
+ assert_equal 0 [r test.stoptimer $id]
+ }
+
+ test {Timer appears non-existing after it fires} {
+ r set "timer-incr-key" 0
+ set id [r test.createtimer 10 timer-incr-key]
+
+ # verify timer fired
+ after 500
+ assert_equal 1 [r get timer-incr-key]
+
+ # verify id does not exist
+ assert_equal {} [r test.gettimer $id]
+ }
+
+ test "Module can be unloaded when timer was finished" {
+ r set "timer-incr-key" 0
+ r test.createtimer 500 timer-incr-key
+
+ # Make sure the Timer has not been fired
+ assert_equal 0 [r get timer-incr-key]
+ # Module can not be unloaded since the timer was ongoing
+ catch {r module unload timer} err
+ assert_match {*the module holds timer that is not fired*} $err
+
+ # Wait to be sure timer has been finished
+ wait_for_condition 10 500 {
+ [r get timer-incr-key] == 1
+ } else {
+ fail "Timer not fired"
+ }
+
+ # Timer fired, can be unloaded now.
+ assert_equal {OK} [r module unload timer]
+ }
+
+ test "Module can be unloaded when timer was stopped" {
+ r module load $testmodule
+ r set "timer-incr-key" 0
+ set id [r test.createtimer 5000 timer-incr-key]
+
+ # Module can not be unloaded since the timer was ongoing
+ catch {r module unload timer} err
+ assert_match {*the module holds timer that is not fired*} $err
+
+ # Stop the timer
+ assert_equal 1 [r test.stoptimer $id]
+
+ # Make sure the Timer has not been fired
+ assert_equal 0 [r get timer-incr-key]
+
+ # Timer has stopped, can be unloaded now.
+ assert_equal {OK} [r module unload timer]
+ }
+}
+
diff --git a/tests/unit/moduleapi/usercall.tcl b/tests/unit/moduleapi/usercall.tcl
new file mode 100644
index 0000000..51ee1a4
--- /dev/null
+++ b/tests/unit/moduleapi/usercall.tcl
@@ -0,0 +1,136 @@
+set testmodule [file normalize tests/modules/usercall.so]
+
+set test_script_set "#!lua
+redis.call('set','x',1)
+return 1"
+
+set test_script_get "#!lua
+redis.call('get','x')
+return 1"
+
+start_server {tags {"modules usercall"}} {
+ r module load $testmodule
+
+ # baseline test that module isn't doing anything weird
+ test {test module check regular redis command without user/acl} {
+ assert_equal [r usercall.reset_user] OK
+ assert_equal [r usercall.add_to_acl "~* &* +@all -set"] OK
+ assert_equal [r usercall.call_without_user set x 5] OK
+ assert_equal [r usercall.reset_user] OK
+ }
+
+ # call with user with acl set on it, but without testing the acl
+ test {test module check regular redis command with user} {
+ assert_equal [r set x 5] OK
+
+ assert_equal [r usercall.reset_user] OK
+ assert_equal [r usercall.add_to_acl "~* &* +@all -set"] OK
+ # off and sanitize-payload because module user / default value
+ assert_equal [r usercall.get_acl] "off sanitize-payload ~* &* +@all -set"
+
+ # doesn't fail for regular commands as just testing acl here
+ assert_equal [r usercall.call_with_user_flag {} set x 10] OK
+
+ assert_equal [r get x] 10
+ assert_equal [r usercall.reset_user] OK
+ }
+
+ # call with user with acl set on it, but with testing the acl in rm_call (for cmd itself)
+ test {test module check regular redis command with user and acl} {
+ assert_equal [r set x 5] OK
+
+ r ACL LOG RESET
+ assert_equal [r usercall.reset_user] OK
+ assert_equal [r usercall.add_to_acl "~* &* +@all -set"] OK
+ # off and sanitize-payload because module user / default value
+ assert_equal [r usercall.get_acl] "off sanitize-payload ~* &* +@all -set"
+
+ # fails here as testing acl in rm call
+ assert_error {*NOPERM User module_user has no permissions*} {r usercall.call_with_user_flag C set x 10}
+
+ assert_equal [r usercall.call_with_user_flag C get x] 5
+
+ # verify that new log entry added
+ set entry [lindex [r ACL LOG] 0]
+ assert_equal [dict get $entry username] {module_user}
+ assert_equal [dict get $entry context] {module}
+ assert_equal [dict get $entry object] {set}
+ assert_equal [dict get $entry reason] {command}
+ assert_match {*cmd=usercall.call_with_user_flag*} [dict get $entry client-info]
+
+ assert_equal [r usercall.reset_user] OK
+ }
+
+ # call with user with acl set on it, but with testing the acl in rm_call (for cmd itself)
+ test {test module check regular redis command with user and acl from blocked background thread} {
+ assert_equal [r set x 5] OK
+
+ r ACL LOG RESET
+ assert_equal [r usercall.reset_user] OK
+ assert_equal [r usercall.add_to_acl "~* &* +@all -set"] OK
+
+ # fails here as testing acl in rm call from a background thread
+ assert_error {*NOPERM User module_user has no permissions*} {r usercall.call_with_user_bg C set x 10}
+
+ assert_equal [r usercall.call_with_user_bg C get x] 5
+
+ # verify that new log entry added
+ set entry [lindex [r ACL LOG] 0]
+ assert_equal [dict get $entry username] {module_user}
+ assert_equal [dict get $entry context] {module}
+ assert_equal [dict get $entry object] {set}
+ assert_equal [dict get $entry reason] {command}
+ assert_match {*cmd=NULL*} [dict get $entry client-info]
+
+ assert_equal [r usercall.reset_user] OK
+ }
+
+ # baseline script test, call without user on script
+ test {test module check eval script without user} {
+ set sha_set [r script load $test_script_set]
+ set sha_get [r script load $test_script_get]
+
+ assert_equal [r usercall.call_without_user evalsha $sha_set 0] 1
+ assert_equal [r usercall.call_without_user evalsha $sha_get 0] 1
+ }
+
+ # baseline script test, call without user on script
+ test {test module check eval script with user being set, but not acl testing} {
+ set sha_set [r script load $test_script_set]
+ set sha_get [r script load $test_script_get]
+
+ assert_equal [r usercall.reset_user] OK
+ assert_equal [r usercall.add_to_acl "~* &* +@all -set"] OK
+ # off and sanitize-payload because module user / default value
+ assert_equal [r usercall.get_acl] "off sanitize-payload ~* &* +@all -set"
+
+ # passes as not checking ACL
+ assert_equal [r usercall.call_with_user_flag {} evalsha $sha_set 0] 1
+ assert_equal [r usercall.call_with_user_flag {} evalsha $sha_get 0] 1
+ }
+
+ # call with user on script (without rm_call acl check) to ensure user carries through to script execution
+ # we already tested the check in rm_call above, here we are checking the script itself will enforce ACL
+ test {test module check eval script with user and acl} {
+ set sha_set [r script load $test_script_set]
+ set sha_get [r script load $test_script_get]
+
+ r ACL LOG RESET
+ assert_equal [r usercall.reset_user] OK
+ assert_equal [r usercall.add_to_acl "~* &* +@all -set"] OK
+
+ # fails here in script, as rm_call will permit the eval call
+ catch {r usercall.call_with_user_flag C evalsha $sha_set 0} e
+ assert_match {*ERR ACL failure in script*} $e
+
+ assert_equal [r usercall.call_with_user_flag C evalsha $sha_get 0] 1
+
+ # verify that new log entry added
+ set entry [lindex [r ACL LOG] 0]
+ assert_equal [dict get $entry username] {module_user}
+ assert_equal [dict get $entry context] {lua}
+ assert_equal [dict get $entry object] {set}
+ assert_equal [dict get $entry reason] {command}
+ assert_match {*cmd=usercall.call_with_user_flag*} [dict get $entry client-info]
+ }
+}
diff --git a/tests/unit/moduleapi/zset.tcl b/tests/unit/moduleapi/zset.tcl
new file mode 100644
index 0000000..b6ab41d
--- /dev/null
+++ b/tests/unit/moduleapi/zset.tcl
@@ -0,0 +1,40 @@
+set testmodule [file normalize tests/modules/zset.so]
+
+start_server {tags {"modules"}} {
+ r module load $testmodule
+
+ test {Module zset rem} {
+ r del k
+ r zadd k 100 hello 200 world
+ assert_equal 1 [r zset.rem k hello]
+ assert_equal 0 [r zset.rem k hello]
+ assert_equal 1 [r exists k]
+ # Check that removing the last element deletes the key
+ assert_equal 1 [r zset.rem k world]
+ assert_equal 0 [r exists k]
+ }
+
+ test {Module zset add} {
+ r del k
+ # Check that failure does not create empty key
+ assert_error "ERR ZsetAdd failed" {r zset.add k nan hello}
+ assert_equal 0 [r exists k]
+
+ r zset.add k 100 hello
+ assert_equal {hello 100} [r zrange k 0 -1 withscores]
+ }
+
+ test {Module zset incrby} {
+ r del k
+ # Check that failure does not create empty key
+ assert_error "ERR ZsetIncrby failed" {r zset.incrby k hello nan}
+ assert_equal 0 [r exists k]
+
+ r zset.incrby k hello 100
+ assert_equal {hello 100} [r zrange k 0 -1 withscores]
+ }
+
+ test "Unload the module - zset" {
+ assert_equal {OK} [r module unload zset]
+ }
+}
diff --git a/tests/unit/multi.tcl b/tests/unit/multi.tcl
new file mode 100644
index 0000000..851e022
--- /dev/null
+++ b/tests/unit/multi.tcl
@@ -0,0 +1,923 @@
+proc wait_for_dbsize {size} {
+ set r2 [redis_client]
+ wait_for_condition 50 100 {
+ [$r2 dbsize] == $size
+ } else {
+ fail "Target dbsize not reached"
+ }
+ $r2 close
+}
+
+start_server {tags {"multi"}} {
+ test {MULTI / EXEC basics} {
+ r del mylist
+ r rpush mylist a
+ r rpush mylist b
+ r rpush mylist c
+ r multi
+ set v1 [r lrange mylist 0 -1]
+ set v2 [r ping]
+ set v3 [r exec]
+ list $v1 $v2 $v3
+ } {QUEUED QUEUED {{a b c} PONG}}
+
+ test {DISCARD} {
+ r del mylist
+ r rpush mylist a
+ r rpush mylist b
+ r rpush mylist c
+ r multi
+ set v1 [r del mylist]
+ set v2 [r discard]
+ set v3 [r lrange mylist 0 -1]
+ list $v1 $v2 $v3
+ } {QUEUED OK {a b c}}
+
+ test {Nested MULTI are not allowed} {
+ set err {}
+ r multi
+ catch {[r multi]} err
+ r exec
+ set _ $err
+ } {*ERR MULTI*}
+
+ test {MULTI where commands alter argc/argv} {
+ r sadd myset a
+ r multi
+ r spop myset
+ list [r exec] [r exists myset]
+ } {a 0}
+
+ test {WATCH inside MULTI is not allowed} {
+ set err {}
+ r multi
+ catch {[r watch x]} err
+ r exec
+ set _ $err
+ } {*ERR WATCH*}
+
+ test {EXEC fails if there are errors while queueing commands #1} {
+ r del foo1{t} foo2{t}
+ r multi
+ r set foo1{t} bar1
+ catch {r non-existing-command}
+ r set foo2{t} bar2
+ catch {r exec} e
+ assert_match {EXECABORT*} $e
+ list [r exists foo1{t}] [r exists foo2{t}]
+ } {0 0}
+
+ test {EXEC fails if there are errors while queueing commands #2} {
+ set rd [redis_deferring_client]
+ r del foo1{t} foo2{t}
+ r multi
+ r set foo1{t} bar1
+ $rd config set maxmemory 1
+ assert {[$rd read] eq {OK}}
+ catch {r lpush mylist{t} myvalue}
+ $rd config set maxmemory 0
+ assert {[$rd read] eq {OK}}
+ r set foo2{t} bar2
+ catch {r exec} e
+ assert_match {EXECABORT*} $e
+ $rd close
+ list [r exists foo1{t}] [r exists foo2{t}]
+ } {0 0} {needs:config-maxmemory}
+
+ test {If EXEC aborts, the client MULTI state is cleared} {
+ r del foo1{t} foo2{t}
+ r multi
+ r set foo1{t} bar1
+ catch {r non-existing-command}
+ r set foo2{t} bar2
+ catch {r exec} e
+ assert_match {EXECABORT*} $e
+ r ping
+ } {PONG}
+
+ test {EXEC works on WATCHed key not modified} {
+ r watch x{t} y{t} z{t}
+ r watch k{t}
+ r multi
+ r ping
+ r exec
+ } {PONG}
+
+ test {EXEC fail on WATCHed key modified (1 key of 1 watched)} {
+ r set x 30
+ r watch x
+ r set x 40
+ r multi
+ r ping
+ r exec
+ } {}
+
+ test {EXEC fail on WATCHed key modified (1 key of 5 watched)} {
+ r set x{t} 30
+ r watch a{t} b{t} x{t} k{t} z{t}
+ r set x{t} 40
+ r multi
+ r ping
+ r exec
+ } {}
+
+ test {EXEC fail on WATCHed key modified by SORT with STORE even if the result is empty} {
+ r flushdb
+ r lpush foo bar
+ r watch foo
+ r sort emptylist store foo
+ r multi
+ r ping
+ r exec
+ } {} {cluster:skip}
+
+ test {EXEC fail on lazy expired WATCHed key} {
+ r del key
+ r debug set-active-expire 0
+
+ for {set j 0} {$j < 10} {incr j} {
+ r set key 1 px 100
+ r watch key
+ after 101
+ r multi
+ r incr key
+
+ set res [r exec]
+ if {$res eq {}} break
+ }
+ if {$::verbose} { puts "EXEC fail on lazy expired WATCHed key attempts: $j" }
+
+ r debug set-active-expire 1
+ set _ $res
+ } {} {needs:debug}
+
+ test {WATCH stale keys should not fail EXEC} {
+ r del x
+ r debug set-active-expire 0
+ r set x foo px 1
+ after 2
+ r watch x
+ r multi
+ r ping
+ assert_equal {PONG} [r exec]
+ r debug set-active-expire 1
+ } {OK} {needs:debug}
+
+ test {Delete WATCHed stale keys should not fail EXEC} {
+ r del x
+ r debug set-active-expire 0
+ r set x foo px 1
+ after 2
+ r watch x
+ # EXISTS triggers lazy expiry/deletion
+ assert_equal 0 [r exists x]
+ r multi
+ r ping
+ assert_equal {PONG} [r exec]
+ r debug set-active-expire 1
+ } {OK} {needs:debug}
+
+ test {FLUSHDB while watching stale keys should not fail EXEC} {
+ r del x
+ r debug set-active-expire 0
+ r set x foo px 1
+ after 2
+ r watch x
+ r flushdb
+ r multi
+ r ping
+ assert_equal {PONG} [r exec]
+ r debug set-active-expire 1
+ } {OK} {needs:debug}
+
+ test {After successful EXEC key is no longer watched} {
+ r set x 30
+ r watch x
+ r multi
+ r ping
+ r exec
+ r set x 40
+ r multi
+ r ping
+ r exec
+ } {PONG}
+
+ test {After failed EXEC key is no longer watched} {
+ r set x 30
+ r watch x
+ r set x 40
+ r multi
+ r ping
+ r exec
+ r set x 40
+ r multi
+ r ping
+ r exec
+ } {PONG}
+
+ test {It is possible to UNWATCH} {
+ r set x 30
+ r watch x
+ r set x 40
+ r unwatch
+ r multi
+ r ping
+ r exec
+ } {PONG}
+
+ test {UNWATCH when there is nothing watched works as expected} {
+ r unwatch
+ } {OK}
+
+ test {FLUSHALL is able to touch the watched keys} {
+ r set x 30
+ r watch x
+ r flushall
+ r multi
+ r ping
+ r exec
+ } {}
+
+ test {FLUSHALL does not touch non affected keys} {
+ r del x
+ r watch x
+ r flushall
+ r multi
+ r ping
+ r exec
+ } {PONG}
+
+ test {FLUSHDB is able to touch the watched keys} {
+ r set x 30
+ r watch x
+ r flushdb
+ r multi
+ r ping
+ r exec
+ } {}
+
+ test {FLUSHDB does not touch non affected keys} {
+ r del x
+ r watch x
+ r flushdb
+ r multi
+ r ping
+ r exec
+ } {PONG}
+
+ test {SWAPDB is able to touch the watched keys that exist} {
+ r flushall
+ r select 0
+ r set x 30
+ r watch x ;# make sure x (set to 30) doesn't change (SWAPDB will "delete" it)
+ r swapdb 0 1
+ r multi
+ r ping
+ r exec
+ } {} {singledb:skip}
+
+ test {SWAPDB is able to touch the watched keys that do not exist} {
+ r flushall
+ r select 1
+ r set x 30
+ r select 0
+ r watch x ;# make sure the key x (currently missing) doesn't change (SWAPDB will create it)
+ r swapdb 0 1
+ r multi
+ r ping
+ r exec
+ } {} {singledb:skip}
+
+ test {SWAPDB does not touch watched stale keys} {
+ r flushall
+ r select 1
+ r debug set-active-expire 0
+ r set x foo px 1
+ after 2
+ r watch x
+ r swapdb 0 1 ; # expired key replaced with no key => no change
+ r multi
+ r ping
+ assert_equal {PONG} [r exec]
+ r debug set-active-expire 1
+ } {OK} {singledb:skip needs:debug}
+
+ test {SWAPDB does not touch non-existing key replaced with stale key} {
+ r flushall
+ r select 0
+ r debug set-active-expire 0
+ r set x foo px 1
+ after 2
+ r select 1
+ r watch x
+ r swapdb 0 1 ; # no key replaced with expired key => no change
+ r multi
+ r ping
+ assert_equal {PONG} [r exec]
+ r debug set-active-expire 1
+ } {OK} {singledb:skip needs:debug}
+
+ test {SWAPDB does not touch stale key replaced with another stale key} {
+ r flushall
+ r debug set-active-expire 0
+ r select 1
+ r set x foo px 1
+ r select 0
+ r set x bar px 1
+ after 2
+ r select 1
+ r watch x
+ r swapdb 0 1 ; # no key replaced with expired key => no change
+ r multi
+ r ping
+ assert_equal {PONG} [r exec]
+ r debug set-active-expire 1
+ } {OK} {singledb:skip needs:debug}
+
+ test {WATCH is able to remember the DB a key belongs to} {
+ r select 5
+ r set x 30
+ r watch x
+ r select 1
+ r set x 10
+ r select 5
+ r multi
+ r ping
+ set res [r exec]
+ # Restore original DB
+ r select 9
+ set res
+ } {PONG} {singledb:skip}
+
+ test {WATCH will consider touched keys target of EXPIRE} {
+ r del x
+ r set x foo
+ r watch x
+ r expire x 10
+ r multi
+ r ping
+ r exec
+ } {}
+
+ test {WATCH will consider touched expired keys} {
+ r flushall
+ r del x
+ r set x foo
+ r expire x 1
+ r watch x
+
+ # Wait for the keys to expire.
+ wait_for_dbsize 0
+
+ r multi
+ r ping
+ r exec
+ } {}
+
+ test {DISCARD should clear the WATCH dirty flag on the client} {
+ r watch x
+ r set x 10
+ r multi
+ r discard
+ r multi
+ r incr x
+ r exec
+ } {11}
+
+ test {DISCARD should UNWATCH all the keys} {
+ r watch x
+ r set x 10
+ r multi
+ r discard
+ r set x 10
+ r multi
+ r incr x
+ r exec
+ } {11}
+
+ test {MULTI / EXEC is not propagated (single write command)} {
+ set repl [attach_to_replication_stream]
+ r multi
+ r set foo bar
+ r exec
+ r set foo2 bar
+ assert_replication_stream $repl {
+ {select *}
+ {set foo bar}
+ {set foo2 bar}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test {MULTI / EXEC is propagated correctly (multiple commands)} {
+ set repl [attach_to_replication_stream]
+ r multi
+ r set foo{t} bar
+ r get foo{t}
+ r set foo2{t} bar2
+ r get foo2{t}
+ r set foo3{t} bar3
+ r get foo3{t}
+ r exec
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {set foo{t} bar}
+ {set foo2{t} bar2}
+ {set foo3{t} bar3}
+ {exec}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test {MULTI / EXEC is propagated correctly (multiple commands with SELECT)} {
+ set repl [attach_to_replication_stream]
+ r multi
+ r select 1
+ r set foo{t} bar
+ r get foo{t}
+ r select 2
+ r set foo2{t} bar2
+ r get foo2{t}
+ r select 3
+ r set foo3{t} bar3
+ r get foo3{t}
+ r exec
+
+ assert_replication_stream $repl {
+ {multi}
+ {select *}
+ {set foo{t} bar}
+ {select *}
+ {set foo2{t} bar2}
+ {select *}
+ {set foo3{t} bar3}
+ {exec}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl singledb:skip}
+
+ test {MULTI / EXEC is propagated correctly (empty transaction)} {
+ set repl [attach_to_replication_stream]
+ r multi
+ r exec
+ r set foo bar
+ assert_replication_stream $repl {
+ {select *}
+ {set foo bar}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test {MULTI / EXEC is propagated correctly (read-only commands)} {
+ r set foo value1
+ set repl [attach_to_replication_stream]
+ r multi
+ r get foo
+ r exec
+ r set foo value2
+ assert_replication_stream $repl {
+ {select *}
+ {set foo value2}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test {MULTI / EXEC is propagated correctly (write command, no effect)} {
+ r del bar
+ r del foo
+ set repl [attach_to_replication_stream]
+ r multi
+ r del foo
+ r exec
+
+ # add another command so that when we see it we know multi-exec wasn't
+ # propagated
+ r incr foo
+
+ assert_replication_stream $repl {
+ {select *}
+ {incr foo}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test {MULTI / EXEC with REPLICAOF} {
+ # This test verifies that if we demote a master to replica inside a transaction, the
+ # entire transaction is not propagated to the already-connected replica
+ set repl [attach_to_replication_stream]
+ r set foo bar
+ r multi
+ r set foo2 bar
+ r replicaof localhost 9999
+ r set foo3 bar
+ r exec
+ catch {r set foo4 bar} e
+ assert_match {READONLY*} $e
+ assert_replication_stream $repl {
+ {select *}
+ {set foo bar}
+ }
+ r replicaof no one
+ } {OK} {needs:repl cluster:skip}
+
+ test {DISCARD should not fail during OOM} {
+ set rd [redis_deferring_client]
+ $rd config set maxmemory 1
+ assert {[$rd read] eq {OK}}
+ r multi
+ catch {r set x 1} e
+ assert_match {OOM*} $e
+ r discard
+ $rd config set maxmemory 0
+ assert {[$rd read] eq {OK}}
+ $rd close
+ r ping
+ } {PONG} {needs:config-maxmemory}
+
+ test {MULTI and script timeout} {
+ # check that if MULTI arrives during timeout, it is either refused, or
+ # allowed to pass, and we don't end up executing half of the transaction
+ set rd1 [redis_deferring_client]
+ set r2 [redis_client]
+ r config set lua-time-limit 10
+ r set xx 1
+ $rd1 eval {while true do end} 0
+ after 200
+ catch { $r2 multi; } e
+ catch { $r2 incr xx; } e
+ r script kill
+ after 200 ; # Give some time to Lua to call the hook again...
+ catch { $r2 incr xx; } e
+ catch { $r2 exec; } e
+ assert_match {EXECABORT*previous errors*} $e
+ set xx [r get xx]
+ # make sure that either the whole transcation passed or none of it (we actually expect none)
+ assert { $xx == 1 || $xx == 3}
+ # check that the connection is no longer in multi state
+ set pong [$r2 ping asdf]
+ assert_equal $pong "asdf"
+ $rd1 close; $r2 close
+ }
+
+ test {EXEC and script timeout} {
+ # check that if EXEC arrives during timeout, we don't end up executing
+ # half of the transaction, and also that we exit the multi state
+ set rd1 [redis_deferring_client]
+ set r2 [redis_client]
+ r config set lua-time-limit 10
+ r set xx 1
+ catch { $r2 multi; } e
+ catch { $r2 incr xx; } e
+ $rd1 eval {while true do end} 0
+ after 200
+ catch { $r2 incr xx; } e
+ catch { $r2 exec; } e
+ assert_match {EXECABORT*BUSY*} $e
+ r script kill
+ after 200 ; # Give some time to Lua to call the hook again...
+ set xx [r get xx]
+ # make sure that either the whole transcation passed or none of it (we actually expect none)
+ assert { $xx == 1 || $xx == 3}
+ # check that the connection is no longer in multi state
+ set pong [$r2 ping asdf]
+ assert_equal $pong "asdf"
+ $rd1 close; $r2 close
+ }
+
+ test {MULTI-EXEC body and script timeout} {
+ # check that we don't run an incomplete transaction due to some commands
+ # arriving during busy script
+ set rd1 [redis_deferring_client]
+ set r2 [redis_client]
+ r config set lua-time-limit 10
+ r set xx 1
+ catch { $r2 multi; } e
+ catch { $r2 incr xx; } e
+ $rd1 eval {while true do end} 0
+ after 200
+ catch { $r2 incr xx; } e
+ r script kill
+ after 200 ; # Give some time to Lua to call the hook again...
+ catch { $r2 exec; } e
+ assert_match {EXECABORT*previous errors*} $e
+ set xx [r get xx]
+ # make sure that either the whole transcation passed or none of it (we actually expect none)
+ assert { $xx == 1 || $xx == 3}
+ # check that the connection is no longer in multi state
+ set pong [$r2 ping asdf]
+ assert_equal $pong "asdf"
+ $rd1 close; $r2 close
+ }
+
+ test {just EXEC and script timeout} {
+ # check that if EXEC arrives during timeout, we don't end up executing
+ # actual commands during busy script, and also that we exit the multi state
+ set rd1 [redis_deferring_client]
+ set r2 [redis_client]
+ r config set lua-time-limit 10
+ r set xx 1
+ catch { $r2 multi; } e
+ catch { $r2 incr xx; } e
+ $rd1 eval {while true do end} 0
+ after 200
+ catch { $r2 exec; } e
+ assert_match {EXECABORT*BUSY*} $e
+ r script kill
+ after 200 ; # Give some time to Lua to call the hook again...
+ set xx [r get xx]
+ # make we didn't execute the transaction
+ assert { $xx == 1}
+ # check that the connection is no longer in multi state
+ set pong [$r2 ping asdf]
+ assert_equal $pong "asdf"
+ $rd1 close; $r2 close
+ }
+
+ test {exec with write commands and state change} {
+ # check that exec that contains write commands fails if server state changed since they were queued
+ set r1 [redis_client]
+ r set xx 1
+ r multi
+ r incr xx
+ $r1 config set min-replicas-to-write 2
+ catch {r exec} e
+ assert_match {*EXECABORT*NOREPLICAS*} $e
+ set xx [r get xx]
+ # make sure that the INCR wasn't executed
+ assert { $xx == 1}
+ $r1 config set min-replicas-to-write 0
+ $r1 close
+ } {0} {needs:repl}
+
+ test {exec with read commands and stale replica state change} {
+ # check that exec that contains read commands fails if server state changed since they were queued
+ r config set replica-serve-stale-data no
+ set r1 [redis_client]
+ r set xx 1
+
+ # check that GET and PING are disallowed on stale replica, even if the replica becomes stale only after queuing.
+ r multi
+ r get xx
+ $r1 replicaof localhsot 0
+ catch {r exec} e
+ assert_match {*EXECABORT*MASTERDOWN*} $e
+
+ # reset
+ $r1 replicaof no one
+
+ r multi
+ r ping
+ $r1 replicaof localhsot 0
+ catch {r exec} e
+ assert_match {*EXECABORT*MASTERDOWN*} $e
+
+ # check that when replica is not stale, GET is allowed
+ # while we're at it, let's check that multi is allowed on stale replica too
+ r multi
+ $r1 replicaof no one
+ r get xx
+ set xx [r exec]
+ # make sure that the INCR was executed
+ assert { $xx == 1 }
+ $r1 close
+ } {0} {needs:repl cluster:skip}
+
+ test {EXEC with only read commands should not be rejected when OOM} {
+ set r2 [redis_client]
+
+ r set x value
+ r multi
+ r get x
+ r ping
+
+ # enforcing OOM
+ $r2 config set maxmemory 1
+
+ # finish the multi transaction with exec
+ assert { [r exec] == {value PONG} }
+
+ # releasing OOM
+ $r2 config set maxmemory 0
+ $r2 close
+ } {0} {needs:config-maxmemory}
+
+ test {EXEC with at least one use-memory command should fail} {
+ set r2 [redis_client]
+
+ r multi
+ r set x 1
+ r get x
+
+ # enforcing OOM
+ $r2 config set maxmemory 1
+
+ # finish the multi transaction with exec
+ catch {r exec} e
+ assert_match {EXECABORT*OOM*} $e
+
+ # releasing OOM
+ $r2 config set maxmemory 0
+ $r2 close
+ } {0} {needs:config-maxmemory}
+
+ test {Blocking commands ignores the timeout} {
+ r xgroup create s{t} g $ MKSTREAM
+
+ set m [r multi]
+ r blpop empty_list{t} 0
+ r brpop empty_list{t} 0
+ r brpoplpush empty_list1{t} empty_list2{t} 0
+ r blmove empty_list1{t} empty_list2{t} LEFT LEFT 0
+ r bzpopmin empty_zset{t} 0
+ r bzpopmax empty_zset{t} 0
+ r xread BLOCK 0 STREAMS s{t} $
+ r xreadgroup group g c BLOCK 0 STREAMS s{t} >
+ set res [r exec]
+
+ list $m $res
+ } {OK {{} {} {} {} {} {} {} {}}}
+
+ test {MULTI propagation of PUBLISH} {
+ set repl [attach_to_replication_stream]
+
+ r multi
+ r publish bla bla
+ r exec
+
+ assert_replication_stream $repl {
+ {select *}
+ {publish bla bla}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl cluster:skip}
+
+ test {MULTI propagation of SCRIPT LOAD} {
+ set repl [attach_to_replication_stream]
+
+ # make sure that SCRIPT LOAD inside MULTI isn't propagated
+ r multi
+ r script load {redis.call('set', KEYS[1], 'foo')}
+ r set foo bar
+ set res [r exec]
+ set sha [lindex $res 0]
+
+ assert_replication_stream $repl {
+ {select *}
+ {set foo bar}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test {MULTI propagation of EVAL} {
+ set repl [attach_to_replication_stream]
+
+ # make sure that EVAL inside MULTI is propagated in a transaction in effects
+ r multi
+ r eval {redis.call('set', KEYS[1], 'bar')} 1 bar
+ r exec
+
+ assert_replication_stream $repl {
+ {select *}
+ {set bar bar}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test {MULTI propagation of SCRIPT FLUSH} {
+ set repl [attach_to_replication_stream]
+
+ # make sure that SCRIPT FLUSH isn't propagated
+ r multi
+ r script flush
+ r set foo bar
+ r exec
+
+ assert_replication_stream $repl {
+ {select *}
+ {set foo bar}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ tags {"stream"} {
+ test {MULTI propagation of XREADGROUP} {
+ set repl [attach_to_replication_stream]
+
+ r XADD mystream * foo bar
+ r XADD mystream * foo2 bar2
+ r XADD mystream * foo3 bar3
+ r XGROUP CREATE mystream mygroup 0
+
+ # make sure the XCALIM (propagated by XREADGROUP) is indeed inside MULTI/EXEC
+ r multi
+ r XREADGROUP GROUP mygroup consumer1 COUNT 2 STREAMS mystream ">"
+ r XREADGROUP GROUP mygroup consumer1 STREAMS mystream ">"
+ r exec
+
+ assert_replication_stream $repl {
+ {select *}
+ {xadd *}
+ {xadd *}
+ {xadd *}
+ {xgroup CREATE *}
+ {multi}
+ {xclaim *}
+ {xclaim *}
+ {xclaim *}
+ {exec}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+ }
+
+ foreach {cmd} {SAVE SHUTDOWN} {
+ test "MULTI with $cmd" {
+ r del foo
+ r multi
+ r set foo bar
+ catch {r $cmd} e1
+ catch {r exec} e2
+ assert_match {*Command not allowed inside a transaction*} $e1
+ assert_match {EXECABORT*} $e2
+ r get foo
+ } {}
+ }
+
+ test "MULTI with BGREWRITEAOF" {
+ set forks [s total_forks]
+ r multi
+ r set foo bar
+ r BGREWRITEAOF
+ set res [r exec]
+ assert_match "*rewriting scheduled*" [lindex $res 1]
+ wait_for_condition 50 100 {
+ [s total_forks] > $forks
+ } else {
+ fail "aofrw didn't start"
+ }
+ waitForBgrewriteaof r
+ } {} {external:skip}
+
+ test "MULTI with config set appendonly" {
+ set lines [count_log_lines 0]
+ set forks [s total_forks]
+ r multi
+ r set foo bar
+ r config set appendonly yes
+ r exec
+ verify_log_message 0 "*AOF background was scheduled*" $lines
+ wait_for_condition 50 100 {
+ [s total_forks] > $forks
+ } else {
+ fail "aofrw didn't start"
+ }
+ waitForBgrewriteaof r
+ } {} {external:skip}
+
+ test "MULTI with config error" {
+ r multi
+ r set foo bar
+ r config set maxmemory bla
+
+ # letting the redis parser read it, it'll throw an exception instead of
+ # reply with an array that contains an error, so we switch to reading
+ # raw RESP instead
+ r readraw 1
+
+ set res [r exec]
+ assert_equal $res "*2"
+ set res [r read]
+ assert_equal $res "+OK"
+ set res [r read]
+ r readraw 0
+ set _ $res
+ } {*CONFIG SET failed*}
+
+ test "Flushall while watching several keys by one client" {
+ r flushall
+ r mset a{t} a b{t} b
+ r watch b{t} a{t}
+ r flushall
+ r ping
+ }
+}
+
+start_server {overrides {appendonly {yes} appendfilename {appendonly.aof} appendfsync always} tags {external:skip}} {
+ test {MULTI with FLUSHALL and AOF} {
+ set aof [get_last_incr_aof_path r]
+ r multi
+ r set foo bar
+ r flushall
+ r exec
+ assert_aof_content $aof {
+ {multi}
+ {select *}
+ {set *}
+ {flushall}
+ {exec}
+ }
+ r get foo
+ } {}
+}
diff --git a/tests/unit/networking.tcl b/tests/unit/networking.tcl
new file mode 100644
index 0000000..79d6e39
--- /dev/null
+++ b/tests/unit/networking.tcl
@@ -0,0 +1,172 @@
+source tests/support/cli.tcl
+
+test {CONFIG SET port number} {
+ start_server {} {
+ if {$::tls} { set port_cfg tls-port} else { set port_cfg port }
+
+ # available port
+ set avail_port [find_available_port $::baseport $::portcount]
+ set rd [redis [srv 0 host] [srv 0 port] 0 $::tls]
+ $rd CONFIG SET $port_cfg $avail_port
+ $rd close
+ set rd [redis [srv 0 host] $avail_port 0 $::tls]
+ $rd PING
+
+ # already inuse port
+ catch {$rd CONFIG SET $port_cfg $::test_server_port} e
+ assert_match {*Unable to listen on this port*} $e
+ $rd close
+
+ # make sure server still listening on the previous port
+ set rd [redis [srv 0 host] $avail_port 0 $::tls]
+ $rd PING
+ $rd close
+ }
+} {} {external:skip}
+
+test {CONFIG SET bind address} {
+ start_server {} {
+ # non-valid address
+ catch {r CONFIG SET bind "999.999.999.999"} e
+ assert_match {*Failed to bind to specified addresses*} $e
+
+ # make sure server still bound to the previous address
+ set rd [redis [srv 0 host] [srv 0 port] 0 $::tls]
+ $rd PING
+ $rd close
+ }
+} {} {external:skip}
+
+# Attempt to connect to host using a client bound to bindaddr,
+# and return a non-zero value if successful within specified
+# millisecond timeout, or zero otherwise.
+proc test_loopback {host bindaddr timeout} {
+ if {[exec uname] != {Linux}} {
+ return 0
+ }
+
+ after $timeout set ::test_loopback_state timeout
+ if {[catch {
+ set server_sock [socket -server accept 0]
+ set port [lindex [fconfigure $server_sock -sockname] 2] } err]} {
+ return 0
+ }
+
+ proc accept {channel clientaddr clientport} {
+ set ::test_loopback_state "connected"
+ close $channel
+ }
+
+ if {[catch {set client_sock [socket -async -myaddr $bindaddr $host $port]} err]} {
+ puts "test_loopback: Client connect failed: $err"
+ } else {
+ close $client_sock
+ }
+
+ vwait ::test_loopback_state
+ close $server_sock
+
+ return [expr {$::test_loopback_state == {connected}}]
+}
+
+test {CONFIG SET bind-source-addr} {
+ if {[test_loopback 127.0.0.1 127.0.0.2 1000]} {
+ start_server {} {
+ start_server {} {
+ set replica [srv 0 client]
+ set master [srv -1 client]
+
+ $master config set protected-mode no
+
+ $replica config set bind-source-addr 127.0.0.2
+ $replica replicaof [srv -1 host] [srv -1 port]
+
+ wait_for_condition 50 100 {
+ [s 0 master_link_status] eq {up}
+ } else {
+ fail "Replication not started."
+ }
+
+ assert_match {*ip=127.0.0.2*} [s -1 slave0]
+ }
+ }
+ } else {
+ if {$::verbose} { puts "Skipping bind-source-addr test." }
+ }
+} {} {external:skip}
+
+start_server {config "minimal.conf" tags {"external:skip"}} {
+ test {Default bind address configuration handling} {
+ # Default is explicit and sane
+ assert_equal "* -::*" [lindex [r CONFIG GET bind] 1]
+
+ # CONFIG REWRITE acknowledges this as a default
+ r CONFIG REWRITE
+ assert_equal 0 [count_message_lines [srv 0 config_file] bind]
+
+ # Removing the bind address works
+ r CONFIG SET bind ""
+ assert_equal "" [lindex [r CONFIG GET bind] 1]
+
+ # No additional clients can connect
+ catch {redis_client} err
+ assert_match {*connection refused*} $err
+
+ # CONFIG REWRITE handles empty bindaddr
+ r CONFIG REWRITE
+ assert_equal 1 [count_message_lines [srv 0 config_file] bind]
+
+ # Make sure we're able to restart
+ restart_server 0 0 0 0
+
+ # Make sure bind parameter is as expected and server handles binding
+ # accordingly.
+ # (it seems that rediscli_exec behaves differently in RESP3, possibly
+ # because CONFIG GET returns a dict instead of a list so redis-cli emits
+ # it in a single line)
+ if {$::force_resp3} {
+ assert_equal {{bind }} [rediscli_exec 0 config get bind]
+ } else {
+ assert_equal {bind {}} [rediscli_exec 0 config get bind]
+ }
+ catch {reconnect 0} err
+ assert_match {*connection refused*} $err
+
+ assert_equal {OK} [rediscli_exec 0 config set bind *]
+ reconnect 0
+ r ping
+ } {PONG}
+
+ test {Protected mode works as expected} {
+ # Get a non-loopback address of this instance for this test.
+ set myaddr [get_nonloopback_addr]
+ if {$myaddr != "" && ![string match {127.*} $myaddr]} {
+ # Non-loopback client should fail by default
+ set r2 [get_nonloopback_client]
+ catch {$r2 ping} err
+ assert_match {*DENIED*} $err
+
+ # Bind configuration should not matter
+ assert_equal {OK} [r config set bind "*"]
+ set r2 [get_nonloopback_client]
+ catch {$r2 ping} err
+ assert_match {*DENIED*} $err
+
+ # Setting a password should disable protected mode
+ assert_equal {OK} [r config set requirepass "secret"]
+ set r2 [redis $myaddr [srv 0 "port"] 0 $::tls]
+ assert_equal {OK} [$r2 auth secret]
+ assert_equal {PONG} [$r2 ping]
+
+ # Clearing the password re-enables protected mode
+ assert_equal {OK} [r config set requirepass ""]
+ set r2 [redis $myaddr [srv 0 "port"] 0 $::tls]
+ assert_match {*DENIED*} $err
+
+ # Explicitly disabling protected-mode works
+ assert_equal {OK} [r config set protected-mode no]
+ set r2 [redis $myaddr [srv 0 "port"] 0 $::tls]
+ assert_equal {PONG} [$r2 ping]
+ }
+ }
+}
diff --git a/tests/unit/obuf-limits.tcl b/tests/unit/obuf-limits.tcl
new file mode 100644
index 0000000..45efc26
--- /dev/null
+++ b/tests/unit/obuf-limits.tcl
@@ -0,0 +1,230 @@
+start_server {tags {"obuf-limits external:skip logreqres:skip"}} {
+ test {CONFIG SET client-output-buffer-limit} {
+ set oldval [lindex [r config get client-output-buffer-limit] 1]
+
+ catch {r config set client-output-buffer-limit "wrong number"} e
+ assert_match {*Wrong*arguments*} $e
+
+ catch {r config set client-output-buffer-limit "invalid_class 10mb 10mb 60"} e
+ assert_match {*Invalid*client*class*} $e
+ catch {r config set client-output-buffer-limit "master 10mb 10mb 60"} e
+ assert_match {*Invalid*client*class*} $e
+
+ catch {r config set client-output-buffer-limit "normal 10mbs 10mb 60"} e
+ assert_match {*Error*hard*} $e
+
+ catch {r config set client-output-buffer-limit "replica 10mb 10mbs 60"} e
+ assert_match {*Error*soft*} $e
+
+ catch {r config set client-output-buffer-limit "pubsub 10mb 10mb 60s"} e
+ assert_match {*Error*soft_seconds*} $e
+
+ r config set client-output-buffer-limit "normal 1mb 2mb 60 replica 3mb 4mb 70 pubsub 5mb 6mb 80"
+ set res [lindex [r config get client-output-buffer-limit] 1]
+ assert_equal $res "normal 1048576 2097152 60 slave 3145728 4194304 70 pubsub 5242880 6291456 80"
+
+ # Set back to the original value.
+ r config set client-output-buffer-limit $oldval
+ }
+
+ test {Client output buffer hard limit is enforced} {
+ r config set client-output-buffer-limit {pubsub 100000 0 0}
+ set rd1 [redis_deferring_client]
+
+ $rd1 subscribe foo
+ set reply [$rd1 read]
+ assert {$reply eq "subscribe foo 1"}
+
+ set omem 0
+ while 1 {
+ r publish foo bar
+ set clients [split [r client list] "\r\n"]
+ set c [split [lindex $clients 1] " "]
+ if {![regexp {omem=([0-9]+)} $c - omem]} break
+ if {$omem > 200000} break
+ }
+ assert {$omem >= 70000 && $omem < 200000}
+ $rd1 close
+ }
+
+ foreach {soft_limit_time wait_for_timeout} {3 yes
+ 4 no } {
+ if $wait_for_timeout {
+ set test_name "Client output buffer soft limit is enforced if time is overreached"
+ } else {
+ set test_name "Client output buffer soft limit is not enforced too early and is enforced when no traffic"
+ }
+
+ test $test_name {
+ r config set client-output-buffer-limit "pubsub 0 100000 $soft_limit_time"
+ set soft_limit_time [expr $soft_limit_time*1000]
+ set rd1 [redis_deferring_client]
+
+ $rd1 client setname test_client
+ set reply [$rd1 read]
+ assert {$reply eq "OK"}
+
+ $rd1 subscribe foo
+ set reply [$rd1 read]
+ assert {$reply eq "subscribe foo 1"}
+
+ set omem 0
+ set start_time 0
+ set time_elapsed 0
+ set last_under_limit_time [clock milliseconds]
+ while 1 {
+ r publish foo [string repeat "x" 1000]
+ set clients [split [r client list] "\r\n"]
+ set c [lsearch -inline $clients *name=test_client*]
+ if {$start_time != 0} {
+ set time_elapsed [expr {[clock milliseconds]-$start_time}]
+ # Make sure test isn't taking too long
+ assert {$time_elapsed <= [expr $soft_limit_time+3000]}
+ }
+ if {$wait_for_timeout && $c == ""} {
+ # Make sure we're disconnected when we reach the soft limit
+ assert {$omem >= 100000 && $time_elapsed >= $soft_limit_time}
+ break
+ } else {
+ assert {[regexp {omem=([0-9]+)} $c - omem]}
+ }
+ if {$omem > 100000} {
+ if {$start_time == 0} {set start_time $last_under_limit_time}
+ if {!$wait_for_timeout && $time_elapsed >= [expr $soft_limit_time-1000]} break
+ # Slow down loop when omem has reached the limit.
+ after 10
+ } else {
+ # if the OS socket buffers swallowed what we previously filled, reset the start timer.
+ set start_time 0
+ set last_under_limit_time [clock milliseconds]
+ }
+ }
+
+ if {!$wait_for_timeout} {
+ # After we completely stopped the traffic, wait for soft limit to time out
+ set timeout [expr {$soft_limit_time+1500 - ([clock milliseconds]-$start_time)}]
+ wait_for_condition [expr $timeout/10] 10 {
+ [lsearch [split [r client list] "\r\n"] *name=test_client*] == -1
+ } else {
+ fail "Soft limit timed out but client still connected"
+ }
+ }
+
+ $rd1 close
+ }
+ }
+
+ test {No response for single command if client output buffer hard limit is enforced} {
+ r config set latency-tracking no
+ r config set client-output-buffer-limit {normal 100000 0 0}
+ # Total size of all items must be more than 100k
+ set item [string repeat "x" 1000]
+ for {set i 0} {$i < 150} {incr i} {
+ r lpush mylist $item
+ }
+ set orig_mem [s used_memory]
+ # Set client name and get all items
+ set rd [redis_deferring_client]
+ $rd client setname mybiglist
+ assert {[$rd read] eq "OK"}
+ $rd lrange mylist 0 -1
+ $rd flush
+ after 100
+
+ # Before we read reply, redis will close this client.
+ set clients [r client list]
+ assert_no_match "*name=mybiglist*" $clients
+ set cur_mem [s used_memory]
+ # 10k just is a deviation threshold
+ assert {$cur_mem < 10000 + $orig_mem}
+
+ # Read nothing
+ set fd [$rd channel]
+ assert_equal {} [$rd rawread]
+ }
+
+ # Note: This test assumes that what's written with one write, will be read by redis in one read.
+ # this assumption is wrong, but seem to work empirically (for now)
+ test {No response for multi commands in pipeline if client output buffer limit is enforced} {
+ r config set client-output-buffer-limit {normal 100000 0 0}
+ set value [string repeat "x" 10000]
+ r set bigkey $value
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+ $rd2 client setname multicommands
+ assert_equal "OK" [$rd2 read]
+
+ # Let redis sleep 1s firstly
+ $rd1 debug sleep 1
+ $rd1 flush
+ after 100
+
+ # Create a pipeline of commands that will be processed in one socket read.
+ # It is important to use one write, in TLS mode independent writes seem
+ # to wait for response from the server.
+ # Total size should be less than OS socket buffer, redis can
+ # execute all commands in this pipeline when it wakes up.
+ set buf ""
+ for {set i 0} {$i < 15} {incr i} {
+ append buf "set $i $i\r\n"
+ append buf "get $i\r\n"
+ append buf "del $i\r\n"
+ # One bigkey is 10k, total response size must be more than 100k
+ append buf "get bigkey\r\n"
+ }
+ $rd2 write $buf
+ $rd2 flush
+ after 100
+
+ # Reds must wake up if it can send reply
+ assert_equal "PONG" [r ping]
+ set clients [r client list]
+ assert_no_match "*name=multicommands*" $clients
+ assert_equal {} [$rd2 rawread]
+ }
+
+ test {Execute transactions completely even if client output buffer limit is enforced} {
+ r config set client-output-buffer-limit {normal 100000 0 0}
+ # Total size of all items must be more than 100k
+ set item [string repeat "x" 1000]
+ for {set i 0} {$i < 150} {incr i} {
+ r lpush mylist2 $item
+ }
+
+ # Output buffer limit is enforced during executing transaction
+ r client setname transactionclient
+ r set k1 v1
+ r multi
+ r set k2 v2
+ r get k2
+ r lrange mylist2 0 -1
+ r set k3 v3
+ r del k1
+ catch {[r exec]} e
+ assert_match "*I/O error*" $e
+ reconnect
+ set clients [r client list]
+ assert_no_match "*name=transactionclient*" $clients
+
+ # Transactions should be executed completely
+ assert_equal {} [r get k1]
+ assert_equal "v2" [r get k2]
+ assert_equal "v3" [r get k3]
+ }
+
+ test "Obuf limit, HRANDFIELD with huge count stopped mid-run" {
+ r config set client-output-buffer-limit {normal 1000000 0 0}
+ r hset myhash a b
+ catch {r hrandfield myhash -999999999} e
+ assert_match "*I/O error*" $e
+ reconnect
+ }
+
+ test "Obuf limit, KEYS stopped mid-run" {
+ r config set client-output-buffer-limit {normal 100000 0 0}
+ populate 1000 "long-key-name-prefix-of-100-chars-------------------------------------------------------------------"
+ catch {r keys *} e
+ assert_match "*I/O error*" $e
+ reconnect
+ }
+}
diff --git a/tests/unit/oom-score-adj.tcl b/tests/unit/oom-score-adj.tcl
new file mode 100644
index 0000000..6c7b713
--- /dev/null
+++ b/tests/unit/oom-score-adj.tcl
@@ -0,0 +1,131 @@
+set system_name [string tolower [exec uname -s]]
+set user_id [exec id -u]
+
+if {$system_name eq {linux}} {
+ start_server {tags {"oom-score-adj external:skip"}} {
+ proc get_oom_score_adj {{pid ""}} {
+ if {$pid == ""} {
+ set pid [srv 0 pid]
+ }
+ set fd [open "/proc/$pid/oom_score_adj" "r"]
+ set val [gets $fd]
+ close $fd
+
+ return $val
+ }
+
+ proc set_oom_score_adj {score {pid ""}} {
+ if {$pid == ""} {
+ set pid [srv 0 pid]
+ }
+ set fd [open "/proc/$pid/oom_score_adj" "w"]
+ puts $fd $score
+ close $fd
+ }
+
+ test {CONFIG SET oom-score-adj works as expected} {
+ set base [get_oom_score_adj]
+
+ # Enable oom-score-adj, check defaults
+ r config set oom-score-adj-values "10 20 30"
+ r config set oom-score-adj yes
+
+ assert {[get_oom_score_adj] == [expr $base + 10]}
+
+ # Modify current class
+ r config set oom-score-adj-values "15 20 30"
+ assert {[get_oom_score_adj] == [expr $base + 15]}
+
+ # Check replica class
+ r replicaof localhost 1
+ assert {[get_oom_score_adj] == [expr $base + 20]}
+ r replicaof no one
+ assert {[get_oom_score_adj] == [expr $base + 15]}
+
+ # Check child process
+ r set key-a value-a
+ r config set rdb-key-save-delay 1000000
+ r bgsave
+
+ set child_pid [get_child_pid 0]
+ # Wait until background child process to setOOMScoreAdj success.
+ wait_for_condition 100 10 {
+ [get_oom_score_adj $child_pid] == [expr $base + 30]
+ } else {
+ fail "Set oom-score-adj of background child process is not ok"
+ }
+ }
+
+ # Failed oom-score-adj tests can only run unprivileged
+ if {$user_id != 0} {
+ test {CONFIG SET oom-score-adj handles configuration failures} {
+ # Bad config
+ r config set oom-score-adj no
+ r config set oom-score-adj-values "-1000 -1000 -1000"
+
+ # Make sure it fails
+ catch {r config set oom-score-adj yes} e
+ assert_match {*Failed to set*} $e
+
+ # Make sure it remains off
+ assert {[r config get oom-score-adj] == "oom-score-adj no"}
+
+ # Fix config
+ r config set oom-score-adj-values "0 100 100"
+ r config set oom-score-adj yes
+
+ # Make sure it fails
+ catch {r config set oom-score-adj-values "-1000 -1000 -1000"} e
+ assert_match {*Failed*} $e
+
+ # Make sure previous values remain
+ assert {[r config get oom-score-adj-values] == {oom-score-adj-values {0 100 100}}}
+ }
+ }
+
+ test {CONFIG SET oom-score-adj-values doesn't touch proc when disabled} {
+ set orig_osa [get_oom_score_adj]
+
+ set other_val1 [expr $orig_osa + 1]
+ set other_val2 [expr $orig_osa + 2]
+
+ r config set oom-score-adj no
+
+ set_oom_score_adj $other_val2
+ assert_equal [get_oom_score_adj] $other_val2
+
+ r config set oom-score-adj-values "$other_val1 $other_val1 $other_val1"
+
+ assert_equal [get_oom_score_adj] $other_val2
+ }
+
+ test {CONFIG SET oom score restored on disable} {
+ r config set oom-score-adj no
+ set_oom_score_adj 22
+ assert_equal [get_oom_score_adj] 22
+
+ r config set oom-score-adj-values "9 9 9" oom-score-adj yes
+ assert_equal [get_oom_score_adj] [expr 9+22]
+
+ r config set oom-score-adj no
+ assert_equal [get_oom_score_adj] 22
+ }
+
+ test {CONFIG SET oom score relative and absolute} {
+ set custom_oom 9
+ r config set oom-score-adj no
+ set base_oom [get_oom_score_adj]
+
+ r config set oom-score-adj-values "$custom_oom $custom_oom $custom_oom" oom-score-adj relative
+ assert_equal [get_oom_score_adj] [expr $base_oom+$custom_oom]
+
+ r config set oom-score-adj absolute
+ assert_equal [get_oom_score_adj] $custom_oom
+ }
+
+ test {CONFIG SET out-of-range oom score} {
+ assert_error {ERR *must be between -2000 and 2000*} {r config set oom-score-adj-values "-2001 -2001 -2001"}
+ assert_error {ERR *must be between -2000 and 2000*} {r config set oom-score-adj-values "2001 2001 2001"}
+ }
+ }
+}
diff --git a/tests/unit/other.tcl b/tests/unit/other.tcl
new file mode 100644
index 0000000..975e944
--- /dev/null
+++ b/tests/unit/other.tcl
@@ -0,0 +1,428 @@
+start_server {tags {"other"}} {
+ if {$::force_failure} {
+ # This is used just for test suite development purposes.
+ test {Failing test} {
+ format err
+ } {ok}
+ }
+
+ test {Coverage: HELP commands} {
+ assert_match "*OBJECT <subcommand> *" [r OBJECT HELP]
+ assert_match "*MEMORY <subcommand> *" [r MEMORY HELP]
+ assert_match "*PUBSUB <subcommand> *" [r PUBSUB HELP]
+ assert_match "*SLOWLOG <subcommand> *" [r SLOWLOG HELP]
+ assert_match "*CLIENT <subcommand> *" [r CLIENT HELP]
+ assert_match "*COMMAND <subcommand> *" [r COMMAND HELP]
+ assert_match "*CONFIG <subcommand> *" [r CONFIG HELP]
+ assert_match "*FUNCTION <subcommand> *" [r FUNCTION HELP]
+ assert_match "*MODULE <subcommand> *" [r MODULE HELP]
+ }
+
+ test {Coverage: MEMORY MALLOC-STATS} {
+ if {[string match {*jemalloc*} [s mem_allocator]]} {
+ assert_match "*jemalloc*" [r memory malloc-stats]
+ }
+ }
+
+ test {Coverage: MEMORY PURGE} {
+ if {[string match {*jemalloc*} [s mem_allocator]]} {
+ assert_equal {OK} [r memory purge]
+ }
+ }
+
+ test {SAVE - make sure there are all the types as values} {
+ # Wait for a background saving in progress to terminate
+ waitForBgsave r
+ r lpush mysavelist hello
+ r lpush mysavelist world
+ r set myemptykey {}
+ r set mynormalkey {blablablba}
+ r zadd mytestzset 10 a
+ r zadd mytestzset 20 b
+ r zadd mytestzset 30 c
+ r save
+ } {OK} {needs:save}
+
+ tags {slow} {
+ if {$::accurate} {set iterations 10000} else {set iterations 1000}
+ foreach fuzztype {binary alpha compr} {
+ test "FUZZ stresser with data model $fuzztype" {
+ set err 0
+ for {set i 0} {$i < $iterations} {incr i} {
+ set fuzz [randstring 0 512 $fuzztype]
+ r set foo $fuzz
+ set got [r get foo]
+ if {$got ne $fuzz} {
+ set err [list $fuzz $got]
+ break
+ }
+ }
+ set _ $err
+ } {0}
+ }
+ }
+
+ start_server {overrides {save ""} tags {external:skip}} {
+ test {FLUSHALL should not reset the dirty counter if we disable save} {
+ r set key value
+ r flushall
+ assert_morethan [s rdb_changes_since_last_save] 0
+ }
+
+ test {FLUSHALL should reset the dirty counter to 0 if we enable save} {
+ r config set save "3600 1 300 100 60 10000"
+ r set key value
+ r flushall
+ assert_equal [s rdb_changes_since_last_save] 0
+ }
+ }
+
+ test {BGSAVE} {
+ # Use FLUSHALL instead of FLUSHDB, FLUSHALL do a foreground save
+ # and reset the dirty counter to 0, so we won't trigger an unexpected bgsave.
+ r flushall
+ r save
+ r set x 10
+ r bgsave
+ waitForBgsave r
+ r debug reload
+ r get x
+ } {10} {needs:debug needs:save}
+
+ test {SELECT an out of range DB} {
+ catch {r select 1000000} err
+ set _ $err
+ } {*index is out of range*} {cluster:skip}
+
+ tags {consistency} {
+ proc check_consistency {dumpname code} {
+ set dump [csvdump r]
+ set sha1 [debug_digest]
+
+ uplevel 1 $code
+
+ set sha1_after [debug_digest]
+ if {$sha1 eq $sha1_after} {
+ return 1
+ }
+
+ # Failed
+ set newdump [csvdump r]
+ puts "Consistency test failed!"
+ puts "You can inspect the two dumps in /tmp/${dumpname}*.txt"
+
+ set fd [open /tmp/${dumpname}1.txt w]
+ puts $fd $dump
+ close $fd
+ set fd [open /tmp/${dumpname}2.txt w]
+ puts $fd $newdump
+ close $fd
+
+ return 0
+ }
+
+ if {$::accurate} {set numops 10000} else {set numops 1000}
+ test {Check consistency of different data types after a reload} {
+ r flushdb
+ createComplexDataset r $numops usetag
+ if {$::ignoredigest} {
+ set _ 1
+ } else {
+ check_consistency {repldump} {
+ r debug reload
+ }
+ }
+ } {1} {needs:debug}
+
+ test {Same dataset digest if saving/reloading as AOF?} {
+ if {$::ignoredigest} {
+ set _ 1
+ } else {
+ check_consistency {aofdump} {
+ r config set aof-use-rdb-preamble no
+ r bgrewriteaof
+ waitForBgrewriteaof r
+ r debug loadaof
+ }
+ }
+ } {1} {needs:debug}
+ }
+
+ test {EXPIRES after a reload (snapshot + append only file rewrite)} {
+ r flushdb
+ r set x 10
+ r expire x 1000
+ r save
+ r debug reload
+ set ttl [r ttl x]
+ set e1 [expr {$ttl > 900 && $ttl <= 1000}]
+ r bgrewriteaof
+ waitForBgrewriteaof r
+ r debug loadaof
+ set ttl [r ttl x]
+ set e2 [expr {$ttl > 900 && $ttl <= 1000}]
+ list $e1 $e2
+ } {1 1} {needs:debug needs:save}
+
+ test {EXPIRES after AOF reload (without rewrite)} {
+ r flushdb
+ r config set appendonly yes
+ r config set aof-use-rdb-preamble no
+ r set x somevalue
+ r expire x 1000
+ r setex y 2000 somevalue
+ r set z somevalue
+ r expireat z [expr {[clock seconds]+3000}]
+
+ # Milliseconds variants
+ r set px somevalue
+ r pexpire px 1000000
+ r psetex py 2000000 somevalue
+ r set pz somevalue
+ r pexpireat pz [expr {([clock seconds]+3000)*1000}]
+
+ # Reload and check
+ waitForBgrewriteaof r
+ # We need to wait two seconds to avoid false positives here, otherwise
+ # the DEBUG LOADAOF command may read a partial file.
+ # Another solution would be to set the fsync policy to no, since this
+ # prevents write() to be delayed by the completion of fsync().
+ after 2000
+ r debug loadaof
+ set ttl [r ttl x]
+ assert {$ttl > 900 && $ttl <= 1000}
+ set ttl [r ttl y]
+ assert {$ttl > 1900 && $ttl <= 2000}
+ set ttl [r ttl z]
+ assert {$ttl > 2900 && $ttl <= 3000}
+ set ttl [r ttl px]
+ assert {$ttl > 900 && $ttl <= 1000}
+ set ttl [r ttl py]
+ assert {$ttl > 1900 && $ttl <= 2000}
+ set ttl [r ttl pz]
+ assert {$ttl > 2900 && $ttl <= 3000}
+ r config set appendonly no
+ } {OK} {needs:debug}
+
+ tags {protocol} {
+ test {PIPELINING stresser (also a regression for the old epoll bug)} {
+ if {$::tls} {
+ set fd2 [::tls::socket [srv host] [srv port]]
+ } else {
+ set fd2 [socket [srv host] [srv port]]
+ }
+ fconfigure $fd2 -encoding binary -translation binary
+ if {!$::singledb} {
+ puts -nonewline $fd2 "SELECT 9\r\n"
+ flush $fd2
+ gets $fd2
+ }
+
+ for {set i 0} {$i < 100000} {incr i} {
+ set q {}
+ set val "0000${i}0000"
+ append q "SET key:$i $val\r\n"
+ puts -nonewline $fd2 $q
+ set q {}
+ append q "GET key:$i\r\n"
+ puts -nonewline $fd2 $q
+ }
+ flush $fd2
+
+ for {set i 0} {$i < 100000} {incr i} {
+ gets $fd2 line
+ gets $fd2 count
+ set count [string range $count 1 end]
+ set val [read $fd2 $count]
+ read $fd2 2
+ }
+ close $fd2
+ set _ 1
+ } {1}
+ }
+
+ test {APPEND basics} {
+ r del foo
+ list [r append foo bar] [r get foo] \
+ [r append foo 100] [r get foo]
+ } {3 bar 6 bar100}
+
+ test {APPEND basics, integer encoded values} {
+ set res {}
+ r del foo
+ r append foo 1
+ r append foo 2
+ lappend res [r get foo]
+ r set foo 1
+ r append foo 2
+ lappend res [r get foo]
+ } {12 12}
+
+ test {APPEND fuzzing} {
+ set err {}
+ foreach type {binary alpha compr} {
+ set buf {}
+ r del x
+ for {set i 0} {$i < 1000} {incr i} {
+ set bin [randstring 0 10 $type]
+ append buf $bin
+ r append x $bin
+ }
+ if {$buf != [r get x]} {
+ set err "Expected '$buf' found '[r get x]'"
+ break
+ }
+ }
+ set _ $err
+ } {}
+
+ # Leave the user with a clean DB before to exit
+ test {FLUSHDB} {
+ set aux {}
+ if {$::singledb} {
+ r flushdb
+ lappend aux 0 [r dbsize]
+ } else {
+ r select 9
+ r flushdb
+ lappend aux [r dbsize]
+ r select 10
+ r flushdb
+ lappend aux [r dbsize]
+ }
+ } {0 0}
+
+ test {Perform a final SAVE to leave a clean DB on disk} {
+ waitForBgsave r
+ r save
+ } {OK} {needs:save}
+
+ test {RESET clears client state} {
+ r client setname test-client
+ r client tracking on
+
+ assert_equal [r reset] "RESET"
+ set client [r client list]
+ assert_match {*name= *} $client
+ assert_match {*flags=N *} $client
+ } {} {needs:reset}
+
+ test {RESET clears MONITOR state} {
+ set rd [redis_deferring_client]
+ $rd monitor
+ assert_equal [$rd read] "OK"
+
+ $rd reset
+ assert_equal [$rd read] "RESET"
+ $rd close
+
+ assert_no_match {*flags=O*} [r client list]
+ } {} {needs:reset}
+
+ test {RESET clears and discards MULTI state} {
+ r multi
+ r set key-a a
+
+ r reset
+ catch {r exec} err
+ assert_match {*EXEC without MULTI*} $err
+ } {} {needs:reset}
+
+ test {RESET clears Pub/Sub state} {
+ r subscribe channel-1
+ r reset
+
+ # confirm we're not subscribed by executing another command
+ r set key val
+ } {OK} {needs:reset}
+
+ test {RESET clears authenticated state} {
+ r acl setuser user1 on >secret +@all
+ r auth user1 secret
+ assert_equal [r acl whoami] user1
+
+ r reset
+
+ assert_equal [r acl whoami] default
+ } {} {needs:reset}
+
+ test "Subcommand syntax error crash (issue #10070)" {
+ assert_error {*unknown command*} {r GET|}
+ assert_error {*unknown command*} {r GET|SET}
+ assert_error {*unknown command*} {r GET|SET|OTHER}
+ assert_error {*unknown command*} {r CONFIG|GET GET_XX}
+ assert_error {*unknown subcommand*} {r CONFIG GET_XX}
+ }
+}
+
+start_server {tags {"other external:skip"}} {
+ test {Don't rehash if redis has child process} {
+ r config set save ""
+ r config set rdb-key-save-delay 1000000
+
+ populate 4096 "" 1
+ r bgsave
+ wait_for_condition 10 100 {
+ [s rdb_bgsave_in_progress] eq 1
+ } else {
+ fail "bgsave did not start in time"
+ }
+
+ r mset k1 v1 k2 v2
+ # Hash table should not rehash
+ assert_no_match "*table size: 8192*" [r debug HTSTATS 9]
+ exec kill -9 [get_child_pid 0]
+ waitForBgsave r
+ after 200 ;# waiting for serverCron
+
+ # Hash table should rehash since there is no child process,
+ # size is power of two and over 4098, so it is 8192
+ r set k3 v3
+ assert_match "*table size: 8192*" [r debug HTSTATS 9]
+ } {} {needs:debug needs:local-process}
+}
+
+proc read_proc_title {pid} {
+ set fd [open "/proc/$pid/cmdline" "r"]
+ set cmdline [read $fd 1024]
+ close $fd
+
+ return $cmdline
+}
+
+start_server {tags {"other external:skip"}} {
+ test {Process title set as expected} {
+ # Test only on Linux where it's easy to get cmdline without relying on tools.
+ # Skip valgrind as it messes up the arguments.
+ set os [exec uname]
+ if {$os == "Linux" && !$::valgrind} {
+ # Set a custom template
+ r config set "proc-title-template" "TEST {title} {listen-addr} {port} {tls-port} {unixsocket} {config-file}"
+ set cmdline [read_proc_title [srv 0 pid]]
+
+ assert_equal "TEST" [lindex $cmdline 0]
+ assert_match "*/redis-server" [lindex $cmdline 1]
+
+ if {$::tls} {
+ set expect_port [srv 0 pport]
+ set expect_tls_port [srv 0 port]
+ set port [srv 0 pport]
+ } else {
+ set expect_port [srv 0 port]
+ set expect_tls_port 0
+ set port [srv 0 port]
+ }
+
+ assert_equal "$::host:$port" [lindex $cmdline 2]
+ assert_equal $expect_port [lindex $cmdline 3]
+ assert_equal $expect_tls_port [lindex $cmdline 4]
+ assert_match "*/tests/tmp/server.*/socket" [lindex $cmdline 5]
+ assert_match "*/tests/tmp/redis.conf.*" [lindex $cmdline 6]
+
+ # Try setting a bad template
+ catch {r config set "proc-title-template" "{invalid-var}"} err
+ assert_match {*template format is invalid*} $err
+ }
+ }
+}
+
diff --git a/tests/unit/pause.tcl b/tests/unit/pause.tcl
new file mode 100644
index 0000000..e30f922
--- /dev/null
+++ b/tests/unit/pause.tcl
@@ -0,0 +1,364 @@
+start_server {tags {"pause network"}} {
+ test "Test read commands are not blocked by client pause" {
+ r client PAUSE 100000 WRITE
+ set rd [redis_deferring_client]
+ $rd GET FOO
+ $rd PING
+ $rd INFO
+ assert_equal [s 0 blocked_clients] 0
+ r client unpause
+ $rd close
+ }
+
+ test "Test old pause-all takes precedence over new pause-write (less restrictive)" {
+ # Scenario:
+ # 1. Run 'PAUSE ALL' for 200msec
+ # 2. Run 'PAUSE WRITE' for 10 msec
+ # 3. Wait 50msec
+ # 4. 'GET FOO'.
+ # Expected that:
+ # - While the time of the second 'PAUSE' is shorter than first 'PAUSE',
+ # pause-client feature will stick to the longer one, i.e, will be paused
+ # up to 200msec.
+ # - The GET command will be postponed ~200msec, even though last command
+ # paused only WRITE. This is because the first 'PAUSE ALL' command is
+ # more restrictive than the second 'PAUSE WRITE' and pause-client feature
+ # preserve most restrictive configuration among multiple settings.
+ set rd [redis_deferring_client]
+ $rd SET FOO BAR
+
+ set test_start_time [clock milliseconds]
+ r client PAUSE 200 ALL
+ r client PAUSE 20 WRITE
+ after 50
+ $rd get FOO
+ set elapsed [expr {[clock milliseconds]-$test_start_time}]
+ assert_lessthan 200 $elapsed
+ }
+
+ test "Test new pause time is smaller than old one, then old time preserved" {
+ r client PAUSE 60000 WRITE
+ r client PAUSE 10 WRITE
+ after 100
+ set rd [redis_deferring_client]
+ $rd SET FOO BAR
+ wait_for_blocked_clients_count 1 100 10
+
+ r client unpause
+ assert_match "OK" [$rd read]
+ $rd close
+ }
+
+ test "Test write commands are paused by RO" {
+ r client PAUSE 60000 WRITE
+
+ set rd [redis_deferring_client]
+ $rd SET FOO BAR
+ wait_for_blocked_clients_count 1 50 100
+
+ r client unpause
+ assert_match "OK" [$rd read]
+ $rd close
+ }
+
+ test "Test special commands are paused by RO" {
+ r PFADD pause-hll test
+ r client PAUSE 100000 WRITE
+
+ # Test that pfcount, which can replicate, is also blocked
+ set rd [redis_deferring_client]
+ $rd PFCOUNT pause-hll
+ wait_for_blocked_clients_count 1 50 100
+
+ # Test that publish, which adds the message to the replication
+ # stream is blocked.
+ set rd2 [redis_deferring_client]
+ $rd2 publish foo bar
+ wait_for_blocked_clients_count 2 50 100
+
+ r client unpause
+ assert_match "1" [$rd read]
+ assert_match "0" [$rd2 read]
+ $rd close
+ $rd2 close
+ }
+
+ test "Test read/admin multi-execs are not blocked by pause RO" {
+ r SET FOO BAR
+ r client PAUSE 100000 WRITE
+ set rr [redis_client]
+ assert_equal [$rr MULTI] "OK"
+ assert_equal [$rr PING] "QUEUED"
+ assert_equal [$rr GET FOO] "QUEUED"
+ assert_match "PONG BAR" [$rr EXEC]
+ assert_equal [s 0 blocked_clients] 0
+ r client unpause
+ $rr close
+ }
+
+ test "Test write multi-execs are blocked by pause RO" {
+ set rd [redis_deferring_client]
+ $rd MULTI
+ assert_equal [$rd read] "OK"
+ $rd SET FOO BAR
+ assert_equal [$rd read] "QUEUED"
+ r client PAUSE 60000 WRITE
+ $rd EXEC
+ wait_for_blocked_clients_count 1 50 100
+ r client unpause
+ assert_match "OK" [$rd read]
+ $rd close
+ }
+
+ test "Test scripts are blocked by pause RO" {
+ r client PAUSE 60000 WRITE
+ set rd [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+ $rd EVAL "return 1" 0
+
+ # test a script with a shebang and no flags for coverage
+ $rd2 EVAL {#!lua
+ return 1
+ } 0
+
+ wait_for_blocked_clients_count 2 50 100
+ r client unpause
+ assert_match "1" [$rd read]
+ assert_match "1" [$rd2 read]
+ $rd close
+ $rd2 close
+ }
+
+ test "Test RO scripts are not blocked by pause RO" {
+ r set x y
+ # create a function for later
+ r FUNCTION load replace {#!lua name=f1
+ redis.register_function{
+ function_name='f1',
+ callback=function() return "hello" end,
+ flags={'no-writes'}
+ }
+ }
+
+ r client PAUSE 6000000 WRITE
+ set rr [redis_client]
+
+ # test an eval that's for sure not in the script cache
+ assert_equal [$rr EVAL {#!lua flags=no-writes
+ return 'unique script'
+ } 0
+ ] "unique script"
+
+ # for sanity, repeat that EVAL on a script that's already cached
+ assert_equal [$rr EVAL {#!lua flags=no-writes
+ return 'unique script'
+ } 0
+ ] "unique script"
+
+ # test EVAL_RO on a unique script that's for sure not in the cache
+ assert_equal [$rr EVAL_RO {
+ return redis.call('GeT', 'x')..' unique script'
+ } 1 x
+ ] "y unique script"
+
+ # test with evalsha
+ set sha [$rr script load {#!lua flags=no-writes
+ return 2
+ }]
+ assert_equal [$rr EVALSHA $sha 0] 2
+
+ # test with function
+ assert_equal [$rr fcall f1 0] hello
+
+ r client unpause
+ $rr close
+ }
+
+ test "Test read-only scripts in multi-exec are not blocked by pause RO" {
+ r SET FOO BAR
+ r client PAUSE 100000 WRITE
+ set rr [redis_client]
+ assert_equal [$rr MULTI] "OK"
+ assert_equal [$rr EVAL {#!lua flags=no-writes
+ return 12
+ } 0
+ ] QUEUED
+ assert_equal [$rr EVAL {#!lua flags=no-writes
+ return 13
+ } 0
+ ] QUEUED
+ assert_match "12 13" [$rr EXEC]
+ assert_equal [s 0 blocked_clients] 0
+ r client unpause
+ $rr close
+ }
+
+ test "Test write scripts in multi-exec are blocked by pause RO" {
+ set rd [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+
+ # one with a shebang
+ $rd MULTI
+ assert_equal [$rd read] "OK"
+ $rd EVAL {#!lua
+ return 12
+ } 0
+ assert_equal [$rd read] "QUEUED"
+
+ # one without a shebang
+ $rd2 MULTI
+ assert_equal [$rd2 read] "OK"
+ $rd2 EVAL {#!lua
+ return 13
+ } 0
+ assert_equal [$rd2 read] "QUEUED"
+
+ r client PAUSE 60000 WRITE
+ $rd EXEC
+ $rd2 EXEC
+ wait_for_blocked_clients_count 2 50 100
+ r client unpause
+ assert_match "12" [$rd read]
+ assert_match "13" [$rd2 read]
+ $rd close
+ $rd2 close
+ }
+
+ test "Test may-replicate commands are rejected in RO scripts" {
+ # that's specifically important for CLIENT PAUSE WRITE
+ assert_error {ERR Write commands are not allowed from read-only scripts. script:*} {
+ r EVAL_RO "return redis.call('publish','ch','msg')" 0
+ }
+ assert_error {ERR Write commands are not allowed from read-only scripts. script:*} {
+ r EVAL {#!lua flags=no-writes
+ return redis.call('publish','ch','msg')
+ } 0
+ }
+ # make sure that publish isn't blocked from a non-RO script
+ assert_equal [r EVAL "return redis.call('publish','ch','msg')" 0] 0
+ }
+
+ test "Test multiple clients can be queued up and unblocked" {
+ r client PAUSE 60000 WRITE
+ set clients [list [redis_deferring_client] [redis_deferring_client] [redis_deferring_client]]
+ foreach client $clients {
+ $client SET FOO BAR
+ }
+
+ wait_for_blocked_clients_count 3 50 100
+ r client unpause
+ foreach client $clients {
+ assert_match "OK" [$client read]
+ $client close
+ }
+ }
+
+ test "Test clients with syntax errors will get responses immediately" {
+ r client PAUSE 100000 WRITE
+ catch {r set FOO} err
+ assert_match "ERR wrong number of arguments for 'set' command" $err
+ r client unpause
+ }
+
+ test "Test both active and passive expires are skipped during client pause" {
+ set expired_keys [s 0 expired_keys]
+ r multi
+ r set foo{t} bar{t} PX 10
+ r set bar{t} foo{t} PX 10
+ r client PAUSE 50000 WRITE
+ r exec
+
+ wait_for_condition 10 100 {
+ [r get foo{t}] == {} && [r get bar{t}] == {}
+ } else {
+ fail "Keys were never logically expired"
+ }
+
+ # No keys should actually have been expired
+ assert_match $expired_keys [s 0 expired_keys]
+
+ r client unpause
+
+ # Force the keys to expire
+ r get foo{t}
+ r get bar{t}
+
+ # Now that clients have been unpaused, expires should go through
+ assert_match [expr $expired_keys + 2] [s 0 expired_keys]
+ }
+
+ test "Test that client pause starts at the end of a transaction" {
+ r MULTI
+ r SET FOO1{t} BAR
+ r client PAUSE 60000 WRITE
+ r SET FOO2{t} BAR
+ r exec
+
+ set rd [redis_deferring_client]
+ $rd SET FOO3{t} BAR
+
+ wait_for_blocked_clients_count 1 50 100
+
+ assert_match "BAR" [r GET FOO1{t}]
+ assert_match "BAR" [r GET FOO2{t}]
+ assert_match "" [r GET FOO3{t}]
+
+ r client unpause
+ assert_match "OK" [$rd read]
+ $rd close
+ }
+
+ start_server {tags {needs:repl external:skip}} {
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+
+ # Avoid PINGs
+ $master config set repl-ping-replica-period 3600
+ r replicaof $master_host $master_port
+
+ wait_for_condition 50 100 {
+ [s master_link_status] eq {up}
+ } else {
+ fail "Replication not started."
+ }
+
+ test "Test when replica paused, offset would not grow" {
+ $master set foo bar
+ set old_master_offset [status $master master_repl_offset]
+
+ wait_for_condition 50 100 {
+ [s slave_repl_offset] == [status $master master_repl_offset]
+ } else {
+ fail "Replication offset not matched."
+ }
+
+ r client pause 100000 write
+ $master set foo2 bar2
+
+ # Make sure replica received data from master
+ wait_for_condition 50 100 {
+ [s slave_read_repl_offset] == [status $master master_repl_offset]
+ } else {
+ fail "Replication not work."
+ }
+
+ # Replica would not apply the write command
+ assert {[s slave_repl_offset] == $old_master_offset}
+ r get foo2
+ } {}
+
+ test "Test replica offset would grow after unpause" {
+ r client unpause
+ wait_for_condition 50 100 {
+ [s slave_repl_offset] == [status $master master_repl_offset]
+ } else {
+ fail "Replication not continue."
+ }
+ r get foo2
+ } {bar2}
+ }
+
+ # Make sure we unpause at the end
+ r client unpause
+}
diff --git a/tests/unit/printver.tcl b/tests/unit/printver.tcl
new file mode 100644
index 0000000..c80f451
--- /dev/null
+++ b/tests/unit/printver.tcl
@@ -0,0 +1,6 @@
+start_server {} {
+ set i [r info]
+ regexp {redis_version:(.*?)\r\n} $i - version
+ regexp {redis_git_sha1:(.*?)\r\n} $i - sha1
+ puts "Testing Redis version $version ($sha1)"
+}
diff --git a/tests/unit/protocol.tcl b/tests/unit/protocol.tcl
new file mode 100644
index 0000000..e3b4115
--- /dev/null
+++ b/tests/unit/protocol.tcl
@@ -0,0 +1,250 @@
+start_server {tags {"protocol network"}} {
+ test "Handle an empty query" {
+ reconnect
+ r write "\r\n"
+ r flush
+ assert_equal "PONG" [r ping]
+ }
+
+ test "Negative multibulk length" {
+ reconnect
+ r write "*-10\r\n"
+ r flush
+ assert_equal PONG [r ping]
+ }
+
+ test "Out of range multibulk length" {
+ reconnect
+ r write "*3000000000\r\n"
+ r flush
+ assert_error "*invalid multibulk length*" {r read}
+ }
+
+ test "Wrong multibulk payload header" {
+ reconnect
+ r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\nfooz\r\n"
+ r flush
+ assert_error "*expected '$', got 'f'*" {r read}
+ }
+
+ test "Negative multibulk payload length" {
+ reconnect
+ r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\n\$-10\r\n"
+ r flush
+ assert_error "*invalid bulk length*" {r read}
+ }
+
+ test "Out of range multibulk payload length" {
+ reconnect
+ r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\n\$2000000000\r\n"
+ r flush
+ assert_error "*invalid bulk length*" {r read}
+ }
+
+ test "Non-number multibulk payload length" {
+ reconnect
+ r write "*3\r\n\$3\r\nSET\r\n\$1\r\nx\r\n\$blabla\r\n"
+ r flush
+ assert_error "*invalid bulk length*" {r read}
+ }
+
+ test "Multi bulk request not followed by bulk arguments" {
+ reconnect
+ r write "*1\r\nfoo\r\n"
+ r flush
+ assert_error "*expected '$', got 'f'*" {r read}
+ }
+
+ test "Generic wrong number of args" {
+ reconnect
+ assert_error "*wrong*arguments*ping*" {r ping x y z}
+ }
+
+ test "Unbalanced number of quotes" {
+ reconnect
+ r write "set \"\"\"test-key\"\"\" test-value\r\n"
+ r write "ping\r\n"
+ r flush
+ assert_error "*unbalanced*" {r read}
+ }
+
+ set c 0
+ foreach seq [list "\x00" "*\x00" "$\x00"] {
+ incr c
+ test "Protocol desync regression test #$c" {
+ if {$::tls} {
+ set s [::tls::socket [srv 0 host] [srv 0 port]]
+ } else {
+ set s [socket [srv 0 host] [srv 0 port]]
+ }
+ puts -nonewline $s $seq
+ set payload [string repeat A 1024]"\n"
+ set test_start [clock seconds]
+ set test_time_limit 30
+ while 1 {
+ if {[catch {
+ puts -nonewline $s payload
+ flush $s
+ incr payload_size [string length $payload]
+ }]} {
+ set retval [gets $s]
+ close $s
+ break
+ } else {
+ set elapsed [expr {[clock seconds]-$test_start}]
+ if {$elapsed > $test_time_limit} {
+ close $s
+ error "assertion:Redis did not closed connection after protocol desync"
+ }
+ }
+ }
+ set retval
+ } {*Protocol error*}
+ }
+ unset c
+
+ # recover the broken connection
+ reconnect
+ r ping
+
+ # raw RESP response tests
+ r readraw 1
+
+ set nullres {*-1}
+ if {$::force_resp3} {
+ set nullres {_}
+ }
+
+ test "raw protocol response" {
+ r srandmember nonexisting_key
+ } "$nullres"
+
+ r deferred 1
+
+ test "raw protocol response - deferred" {
+ r srandmember nonexisting_key
+ r read
+ } "$nullres"
+
+ test "raw protocol response - multiline" {
+ r sadd ss a
+ assert_equal [r read] {:1}
+ r srandmember ss 100
+ assert_equal [r read] {*1}
+ assert_equal [r read] {$1}
+ assert_equal [r read] {a}
+ }
+
+ # restore connection settings
+ r readraw 0
+ r deferred 0
+
+ # check the connection still works
+ assert_equal [r ping] {PONG}
+
+ test {RESP3 attributes} {
+ r hello 3
+ assert_equal {Some real reply following the attribute} [r debug protocol attrib]
+ assert_equal {key-popularity {key:123 90}} [r attributes]
+
+ # make sure attributes are not kept from previous command
+ r ping
+ assert_error {*attributes* no such element in array} {r attributes}
+
+ # restore state
+ r hello 2
+ set _ ""
+ } {} {needs:debug resp3}
+
+ test {RESP3 attributes readraw} {
+ r hello 3
+ r readraw 1
+ r deferred 1
+
+ r debug protocol attrib
+ assert_equal [r read] {|1}
+ assert_equal [r read] {$14}
+ assert_equal [r read] {key-popularity}
+ assert_equal [r read] {*2}
+ assert_equal [r read] {$7}
+ assert_equal [r read] {key:123}
+ assert_equal [r read] {:90}
+ assert_equal [r read] {$39}
+ assert_equal [r read] {Some real reply following the attribute}
+
+ # restore state
+ r readraw 0
+ r deferred 0
+ r hello 2
+ set _ {}
+ } {} {needs:debug resp3}
+
+ test {RESP3 attributes on RESP2} {
+ r hello 2
+ set res [r debug protocol attrib]
+ set _ $res
+ } {Some real reply following the attribute} {needs:debug}
+
+ test "test big number parsing" {
+ r hello 3
+ r debug protocol bignum
+ } {1234567999999999999999999999999999999} {needs:debug resp3}
+
+ test "test bool parsing" {
+ r hello 3
+ assert_equal [r debug protocol true] 1
+ assert_equal [r debug protocol false] 0
+ r hello 2
+ assert_equal [r debug protocol true] 1
+ assert_equal [r debug protocol false] 0
+ set _ {}
+ } {} {needs:debug resp3}
+
+ test "test verbatim str parsing" {
+ r hello 3
+ r debug protocol verbatim
+ } "This is a verbatim\nstring" {needs:debug resp3}
+
+ test "test large number of args" {
+ r flushdb
+ set args [split [string trim [string repeat "k v " 10000]]]
+ lappend args "{k}2" v2
+ r mset {*}$args
+ assert_equal [r get "{k}2"] v2
+ }
+
+ test "test argument rewriting - issue 9598" {
+ # INCRBYFLOAT uses argument rewriting for correct float value propagation.
+ # We use it to make sure argument rewriting works properly. It's important
+ # this test is run under valgrind to verify there are no memory leaks in
+ # arg buffer handling.
+ r flushdb
+
+ # Test normal argument handling
+ r set k 0
+ assert_equal [r incrbyfloat k 1.0] 1
+
+ # Test argument handing in multi-state buffers
+ r multi
+ r incrbyfloat k 1.0
+ assert_equal [r exec] 2
+ }
+
+}
+
+start_server {tags {"regression"}} {
+ test "Regression for a crash with blocking ops and pipelining" {
+ set rd [redis_deferring_client]
+ set fd [r channel]
+ set proto "*3\r\n\$5\r\nBLPOP\r\n\$6\r\nnolist\r\n\$1\r\n0\r\n"
+ puts -nonewline $fd $proto$proto
+ flush $fd
+ set res {}
+
+ $rd rpush nolist a
+ $rd read
+ $rd rpush nolist a
+ $rd read
+ $rd close
+ }
+}
diff --git a/tests/unit/pubsub.tcl b/tests/unit/pubsub.tcl
new file mode 100644
index 0000000..3797b00
--- /dev/null
+++ b/tests/unit/pubsub.tcl
@@ -0,0 +1,506 @@
+start_server {tags {"pubsub network"}} {
+ if {$::singledb} {
+ set db 0
+ } else {
+ set db 9
+ }
+
+ foreach resp {2 3} {
+ set rd1 [redis_deferring_client]
+ if {[lsearch $::denytags "resp3"] >= 0} {
+ if {$resp == 3} {continue}
+ } elseif {$::force_resp3} {
+ if {$resp == 2} {continue}
+ }
+
+ $rd1 hello $resp
+ $rd1 read
+
+ test "Pub/Sub PING on RESP$resp" {
+ subscribe $rd1 somechannel
+ # While subscribed to non-zero channels PING works in Pub/Sub mode.
+ $rd1 ping
+ $rd1 ping "foo"
+ # In RESP3, the SUBSCRIBEd client can issue any command and get a reply, so the PINGs are standard
+ # In RESP2, only a handful of commands are allowed after a client is SUBSCRIBED (PING is one of them).
+ # For some reason, the reply in that case is an array with two elements: "pong" and argv[1] or an empty string
+ # God knows why. Done in commit 2264b981
+ if {$resp == 3} {
+ assert_equal {PONG} [$rd1 read]
+ assert_equal {foo} [$rd1 read]
+ } else {
+ assert_equal {pong {}} [$rd1 read]
+ assert_equal {pong foo} [$rd1 read]
+ }
+ unsubscribe $rd1 somechannel
+ # Now we are unsubscribed, PING should just return PONG.
+ $rd1 ping
+ assert_equal {PONG} [$rd1 read]
+
+ }
+ $rd1 close
+ }
+
+ test "PUBLISH/SUBSCRIBE basics" {
+ set rd1 [redis_deferring_client]
+
+ # subscribe to two channels
+ assert_equal {1 2} [subscribe $rd1 {chan1 chan2}]
+ assert_equal 1 [r publish chan1 hello]
+ assert_equal 1 [r publish chan2 world]
+ assert_equal {message chan1 hello} [$rd1 read]
+ assert_equal {message chan2 world} [$rd1 read]
+
+ # unsubscribe from one of the channels
+ unsubscribe $rd1 {chan1}
+ assert_equal 0 [r publish chan1 hello]
+ assert_equal 1 [r publish chan2 world]
+ assert_equal {message chan2 world} [$rd1 read]
+
+ # unsubscribe from the remaining channel
+ unsubscribe $rd1 {chan2}
+ assert_equal 0 [r publish chan1 hello]
+ assert_equal 0 [r publish chan2 world]
+
+ # clean up clients
+ $rd1 close
+ }
+
+ test "PUBLISH/SUBSCRIBE with two clients" {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+
+ assert_equal {1} [subscribe $rd1 {chan1}]
+ assert_equal {1} [subscribe $rd2 {chan1}]
+ assert_equal 2 [r publish chan1 hello]
+ assert_equal {message chan1 hello} [$rd1 read]
+ assert_equal {message chan1 hello} [$rd2 read]
+
+ # clean up clients
+ $rd1 close
+ $rd2 close
+ }
+
+ test "PUBLISH/SUBSCRIBE after UNSUBSCRIBE without arguments" {
+ set rd1 [redis_deferring_client]
+ assert_equal {1 2 3} [subscribe $rd1 {chan1 chan2 chan3}]
+ unsubscribe $rd1
+ assert_equal 0 [r publish chan1 hello]
+ assert_equal 0 [r publish chan2 hello]
+ assert_equal 0 [r publish chan3 hello]
+
+ # clean up clients
+ $rd1 close
+ }
+
+ test "SUBSCRIBE to one channel more than once" {
+ set rd1 [redis_deferring_client]
+ assert_equal {1 1 1} [subscribe $rd1 {chan1 chan1 chan1}]
+ assert_equal 1 [r publish chan1 hello]
+ assert_equal {message chan1 hello} [$rd1 read]
+
+ # clean up clients
+ $rd1 close
+ }
+
+ test "UNSUBSCRIBE from non-subscribed channels" {
+ set rd1 [redis_deferring_client]
+ assert_equal {0 0 0} [unsubscribe $rd1 {foo bar quux}]
+
+ # clean up clients
+ $rd1 close
+ }
+
+ test "PUBLISH/PSUBSCRIBE basics" {
+ set rd1 [redis_deferring_client]
+
+ # subscribe to two patterns
+ assert_equal {1 2} [psubscribe $rd1 {foo.* bar.*}]
+ assert_equal 1 [r publish foo.1 hello]
+ assert_equal 1 [r publish bar.1 hello]
+ assert_equal 0 [r publish foo1 hello]
+ assert_equal 0 [r publish barfoo.1 hello]
+ assert_equal 0 [r publish qux.1 hello]
+ assert_equal {pmessage foo.* foo.1 hello} [$rd1 read]
+ assert_equal {pmessage bar.* bar.1 hello} [$rd1 read]
+
+ # unsubscribe from one of the patterns
+ assert_equal {1} [punsubscribe $rd1 {foo.*}]
+ assert_equal 0 [r publish foo.1 hello]
+ assert_equal 1 [r publish bar.1 hello]
+ assert_equal {pmessage bar.* bar.1 hello} [$rd1 read]
+
+ # unsubscribe from the remaining pattern
+ assert_equal {0} [punsubscribe $rd1 {bar.*}]
+ assert_equal 0 [r publish foo.1 hello]
+ assert_equal 0 [r publish bar.1 hello]
+
+ # clean up clients
+ $rd1 close
+ }
+
+ test "PUBLISH/PSUBSCRIBE with two clients" {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+
+ assert_equal {1} [psubscribe $rd1 {chan.*}]
+ assert_equal {1} [psubscribe $rd2 {chan.*}]
+ assert_equal 2 [r publish chan.foo hello]
+ assert_equal {pmessage chan.* chan.foo hello} [$rd1 read]
+ assert_equal {pmessage chan.* chan.foo hello} [$rd2 read]
+
+ # clean up clients
+ $rd1 close
+ $rd2 close
+ }
+
+ test "PUBLISH/PSUBSCRIBE after PUNSUBSCRIBE without arguments" {
+ set rd1 [redis_deferring_client]
+ assert_equal {1 2 3} [psubscribe $rd1 {chan1.* chan2.* chan3.*}]
+ punsubscribe $rd1
+ assert_equal 0 [r publish chan1.hi hello]
+ assert_equal 0 [r publish chan2.hi hello]
+ assert_equal 0 [r publish chan3.hi hello]
+
+ # clean up clients
+ $rd1 close
+ }
+
+ test "PubSub messages with CLIENT REPLY OFF" {
+ set rd [redis_deferring_client]
+ $rd hello 3
+ $rd read ;# Discard the hello reply
+
+ # Test that the subscribe/psubscribe notification is ok
+ $rd client reply off
+ assert_equal {1} [subscribe $rd channel]
+ assert_equal {2} [psubscribe $rd ch*]
+
+ # Test that the publish notification is ok
+ $rd client reply off
+ assert_equal 2 [r publish channel hello]
+ assert_equal {message channel hello} [$rd read]
+ assert_equal {pmessage ch* channel hello} [$rd read]
+
+ # Test that the unsubscribe/punsubscribe notification is ok
+ $rd client reply off
+ assert_equal {1} [unsubscribe $rd channel]
+ assert_equal {0} [punsubscribe $rd ch*]
+
+ $rd close
+ } {0} {resp3}
+
+ test "PUNSUBSCRIBE from non-subscribed channels" {
+ set rd1 [redis_deferring_client]
+ assert_equal {0 0 0} [punsubscribe $rd1 {foo.* bar.* quux.*}]
+
+ # clean up clients
+ $rd1 close
+ }
+
+ test "NUMSUB returns numbers, not strings (#1561)" {
+ r pubsub numsub abc def
+ } {abc 0 def 0}
+
+ test "NUMPATs returns the number of unique patterns" {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+
+ # Three unique patterns and one that overlaps
+ psubscribe $rd1 "foo*"
+ psubscribe $rd2 "foo*"
+ psubscribe $rd1 "bar*"
+ psubscribe $rd2 "baz*"
+
+ set patterns [r pubsub numpat]
+
+ # clean up clients
+ punsubscribe $rd1
+ punsubscribe $rd2
+ assert_equal 3 $patterns
+ $rd1 close
+ $rd2 close
+ }
+
+ test "Mix SUBSCRIBE and PSUBSCRIBE" {
+ set rd1 [redis_deferring_client]
+ assert_equal {1} [subscribe $rd1 {foo.bar}]
+ assert_equal {2} [psubscribe $rd1 {foo.*}]
+
+ assert_equal 2 [r publish foo.bar hello]
+ assert_equal {message foo.bar hello} [$rd1 read]
+ assert_equal {pmessage foo.* foo.bar hello} [$rd1 read]
+
+ # clean up clients
+ $rd1 close
+ }
+
+ test "PUNSUBSCRIBE and UNSUBSCRIBE should always reply" {
+ # Make sure we are not subscribed to any channel at all.
+ r punsubscribe
+ r unsubscribe
+ # Now check if the commands still reply correctly.
+ set reply1 [r punsubscribe]
+ set reply2 [r unsubscribe]
+ concat $reply1 $reply2
+ } {punsubscribe {} 0 unsubscribe {} 0}
+
+ ### Keyspace events notification tests
+
+ test "Keyspace notifications: we receive keyspace notifications" {
+ r config set notify-keyspace-events KA
+ set rd1 [redis_deferring_client]
+ $rd1 CLIENT REPLY OFF ;# Make sure it works even if replies are silenced
+ assert_equal {1} [psubscribe $rd1 *]
+ r set foo bar
+ assert_equal "pmessage * __keyspace@${db}__:foo set" [$rd1 read]
+ $rd1 close
+ }
+
+ test "Keyspace notifications: we receive keyevent notifications" {
+ r config set notify-keyspace-events EA
+ set rd1 [redis_deferring_client]
+ $rd1 CLIENT REPLY SKIP ;# Make sure it works even if replies are silenced
+ assert_equal {1} [psubscribe $rd1 *]
+ r set foo bar
+ assert_equal "pmessage * __keyevent@${db}__:set foo" [$rd1 read]
+ $rd1 close
+ }
+
+ test "Keyspace notifications: we can receive both kind of events" {
+ r config set notify-keyspace-events KEA
+ set rd1 [redis_deferring_client]
+ $rd1 CLIENT REPLY ON ;# Just coverage
+ assert_equal {OK} [$rd1 read]
+ assert_equal {1} [psubscribe $rd1 *]
+ r set foo bar
+ assert_equal "pmessage * __keyspace@${db}__:foo set" [$rd1 read]
+ assert_equal "pmessage * __keyevent@${db}__:set foo" [$rd1 read]
+ $rd1 close
+ }
+
+ test "Keyspace notifications: we are able to mask events" {
+ r config set notify-keyspace-events KEl
+ r del mylist
+ set rd1 [redis_deferring_client]
+ assert_equal {1} [psubscribe $rd1 *]
+ r set foo bar
+ r lpush mylist a
+ # No notification for set, because only list commands are enabled.
+ assert_equal "pmessage * __keyspace@${db}__:mylist lpush" [$rd1 read]
+ assert_equal "pmessage * __keyevent@${db}__:lpush mylist" [$rd1 read]
+ $rd1 close
+ }
+
+ test "Keyspace notifications: general events test" {
+ r config set notify-keyspace-events KEg
+ set rd1 [redis_deferring_client]
+ assert_equal {1} [psubscribe $rd1 *]
+ r set foo bar
+ r expire foo 1
+ r del foo
+ assert_equal "pmessage * __keyspace@${db}__:foo expire" [$rd1 read]
+ assert_equal "pmessage * __keyevent@${db}__:expire foo" [$rd1 read]
+ assert_equal "pmessage * __keyspace@${db}__:foo del" [$rd1 read]
+ assert_equal "pmessage * __keyevent@${db}__:del foo" [$rd1 read]
+ $rd1 close
+ }
+
+ test "Keyspace notifications: list events test" {
+ r config set notify-keyspace-events KEl
+ r del mylist
+ set rd1 [redis_deferring_client]
+ assert_equal {1} [psubscribe $rd1 *]
+ r lpush mylist a
+ r rpush mylist a
+ r rpop mylist
+ assert_equal "pmessage * __keyspace@${db}__:mylist lpush" [$rd1 read]
+ assert_equal "pmessage * __keyevent@${db}__:lpush mylist" [$rd1 read]
+ assert_equal "pmessage * __keyspace@${db}__:mylist rpush" [$rd1 read]
+ assert_equal "pmessage * __keyevent@${db}__:rpush mylist" [$rd1 read]
+ assert_equal "pmessage * __keyspace@${db}__:mylist rpop" [$rd1 read]
+ assert_equal "pmessage * __keyevent@${db}__:rpop mylist" [$rd1 read]
+ $rd1 close
+ }
+
+ test "Keyspace notifications: set events test" {
+ r config set notify-keyspace-events Ks
+ r del myset
+ set rd1 [redis_deferring_client]
+ assert_equal {1} [psubscribe $rd1 *]
+ r sadd myset a b c d
+ r srem myset x
+ r sadd myset x y z
+ r srem myset x
+ assert_equal "pmessage * __keyspace@${db}__:myset sadd" [$rd1 read]
+ assert_equal "pmessage * __keyspace@${db}__:myset sadd" [$rd1 read]
+ assert_equal "pmessage * __keyspace@${db}__:myset srem" [$rd1 read]
+ $rd1 close
+ }
+
+ test "Keyspace notifications: zset events test" {
+ r config set notify-keyspace-events Kz
+ r del myzset
+ set rd1 [redis_deferring_client]
+ assert_equal {1} [psubscribe $rd1 *]
+ r zadd myzset 1 a 2 b
+ r zrem myzset x
+ r zadd myzset 3 x 4 y 5 z
+ r zrem myzset x
+ assert_equal "pmessage * __keyspace@${db}__:myzset zadd" [$rd1 read]
+ assert_equal "pmessage * __keyspace@${db}__:myzset zadd" [$rd1 read]
+ assert_equal "pmessage * __keyspace@${db}__:myzset zrem" [$rd1 read]
+ $rd1 close
+ }
+
+ test "Keyspace notifications: hash events test" {
+ r config set notify-keyspace-events Kh
+ r del myhash
+ set rd1 [redis_deferring_client]
+ assert_equal {1} [psubscribe $rd1 *]
+ r hmset myhash yes 1 no 0
+ r hincrby myhash yes 10
+ assert_equal "pmessage * __keyspace@${db}__:myhash hset" [$rd1 read]
+ assert_equal "pmessage * __keyspace@${db}__:myhash hincrby" [$rd1 read]
+ $rd1 close
+ }
+
+ test "Keyspace notifications: stream events test" {
+ r config set notify-keyspace-events Kt
+ r del mystream
+ set rd1 [redis_deferring_client]
+ assert_equal {1} [psubscribe $rd1 *]
+ r xgroup create mystream mygroup $ mkstream
+ r xgroup createconsumer mystream mygroup Bob
+ set id [r xadd mystream 1 field1 A]
+ r xreadgroup group mygroup Alice STREAMS mystream >
+ r xclaim mystream mygroup Mike 0 $id force
+ # Not notify because of "Lee" not exists.
+ r xgroup delconsumer mystream mygroup Lee
+ # Not notify because of "Bob" exists.
+ r xautoclaim mystream mygroup Bob 0 $id
+ r xgroup delconsumer mystream mygroup Bob
+ assert_equal "pmessage * __keyspace@${db}__:mystream xgroup-create" [$rd1 read]
+ assert_equal "pmessage * __keyspace@${db}__:mystream xgroup-createconsumer" [$rd1 read]
+ assert_equal "pmessage * __keyspace@${db}__:mystream xadd" [$rd1 read]
+ assert_equal "pmessage * __keyspace@${db}__:mystream xgroup-createconsumer" [$rd1 read]
+ assert_equal "pmessage * __keyspace@${db}__:mystream xgroup-createconsumer" [$rd1 read]
+ assert_equal "pmessage * __keyspace@${db}__:mystream xgroup-delconsumer" [$rd1 read]
+ $rd1 close
+ }
+
+ test "Keyspace notifications: expired events (triggered expire)" {
+ r config set notify-keyspace-events Ex
+ r del foo
+ set rd1 [redis_deferring_client]
+ assert_equal {1} [psubscribe $rd1 *]
+ r psetex foo 100 1
+ wait_for_condition 50 100 {
+ [r exists foo] == 0
+ } else {
+ fail "Key does not expire?!"
+ }
+ assert_equal "pmessage * __keyevent@${db}__:expired foo" [$rd1 read]
+ $rd1 close
+ }
+
+ test "Keyspace notifications: expired events (background expire)" {
+ r config set notify-keyspace-events Ex
+ r del foo
+ set rd1 [redis_deferring_client]
+ assert_equal {1} [psubscribe $rd1 *]
+ r psetex foo 100 1
+ assert_equal "pmessage * __keyevent@${db}__:expired foo" [$rd1 read]
+ $rd1 close
+ }
+
+ test "Keyspace notifications: evicted events" {
+ r config set notify-keyspace-events Ee
+ r config set maxmemory-policy allkeys-lru
+ r flushdb
+ set rd1 [redis_deferring_client]
+ assert_equal {1} [psubscribe $rd1 *]
+ r set foo bar
+ r config set maxmemory 1
+ assert_equal "pmessage * __keyevent@${db}__:evicted foo" [$rd1 read]
+ r config set maxmemory 0
+ $rd1 close
+ r config set maxmemory-policy noeviction
+ } {OK} {needs:config-maxmemory}
+
+ test "Keyspace notifications: test CONFIG GET/SET of event flags" {
+ r config set notify-keyspace-events gKE
+ assert_equal {gKE} [lindex [r config get notify-keyspace-events] 1]
+ r config set notify-keyspace-events {$lshzxeKE}
+ assert_equal {$lshzxeKE} [lindex [r config get notify-keyspace-events] 1]
+ r config set notify-keyspace-events KA
+ assert_equal {AK} [lindex [r config get notify-keyspace-events] 1]
+ r config set notify-keyspace-events EA
+ assert_equal {AE} [lindex [r config get notify-keyspace-events] 1]
+ }
+
+ test "Keyspace notifications: new key test" {
+ r config set notify-keyspace-events En
+ set rd1 [redis_deferring_client]
+ assert_equal {1} [psubscribe $rd1 *]
+ r set foo bar
+ # second set of foo should not cause a 'new' event
+ r set foo baz
+ r set bar bar
+ assert_equal "pmessage * __keyevent@${db}__:new foo" [$rd1 read]
+ assert_equal "pmessage * __keyevent@${db}__:new bar" [$rd1 read]
+ $rd1 close
+ }
+
+ test "publish to self inside multi" {
+ r hello 3
+ r subscribe foo
+ r multi
+ r ping abc
+ r publish foo bar
+ r publish foo vaz
+ r ping def
+ assert_equal [r exec] {abc 1 1 def}
+ assert_equal [r read] {message foo bar}
+ assert_equal [r read] {message foo vaz}
+ } {} {resp3}
+
+ test "publish to self inside script" {
+ r hello 3
+ r subscribe foo
+ set res [r eval {
+ redis.call("ping","abc")
+ redis.call("publish","foo","bar")
+ redis.call("publish","foo","vaz")
+ redis.call("ping","def")
+ return "bla"} 0]
+ assert_equal $res {bla}
+ assert_equal [r read] {message foo bar}
+ assert_equal [r read] {message foo vaz}
+ } {} {resp3}
+
+ test "unsubscribe inside multi, and publish to self" {
+ r hello 3
+
+ # Note: SUBSCRIBE and UNSUBSCRIBE with multiple channels in the same command,
+ # breaks the multi response, see https://github.com/redis/redis/issues/12207
+ # this is just a temporary sanity test to detect unintended breakage.
+
+ # subscribe for 3 channels actually emits 3 "responses"
+ assert_equal "subscribe foo 1" [r subscribe foo bar baz]
+ assert_equal "subscribe bar 2" [r read]
+ assert_equal "subscribe baz 3" [r read]
+
+ r multi
+ r ping abc
+ r unsubscribe bar
+ r unsubscribe baz
+ r ping def
+ assert_equal [r exec] {abc {unsubscribe bar 2} {unsubscribe baz 1} def}
+
+ # published message comes after the publish command's response.
+ assert_equal [r publish foo vaz] {1}
+ assert_equal [r read] {message foo vaz}
+ } {} {resp3}
+
+}
diff --git a/tests/unit/pubsubshard.tcl b/tests/unit/pubsubshard.tcl
new file mode 100644
index 0000000..6e3fb61
--- /dev/null
+++ b/tests/unit/pubsubshard.tcl
@@ -0,0 +1,164 @@
+start_server {tags {"pubsubshard external:skip"}} {
+ test "SPUBLISH/SSUBSCRIBE basics" {
+ set rd1 [redis_deferring_client]
+
+ # subscribe to two channels
+ assert_equal {1} [ssubscribe $rd1 {chan1}]
+ assert_equal {2} [ssubscribe $rd1 {chan2}]
+ assert_equal 1 [r SPUBLISH chan1 hello]
+ assert_equal 1 [r SPUBLISH chan2 world]
+ assert_equal {smessage chan1 hello} [$rd1 read]
+ assert_equal {smessage chan2 world} [$rd1 read]
+
+ # unsubscribe from one of the channels
+ sunsubscribe $rd1 {chan1}
+ assert_equal 0 [r SPUBLISH chan1 hello]
+ assert_equal 1 [r SPUBLISH chan2 world]
+ assert_equal {smessage chan2 world} [$rd1 read]
+
+ # unsubscribe from the remaining channel
+ sunsubscribe $rd1 {chan2}
+ assert_equal 0 [r SPUBLISH chan1 hello]
+ assert_equal 0 [r SPUBLISH chan2 world]
+
+ # clean up clients
+ $rd1 close
+ }
+
+ test "SPUBLISH/SSUBSCRIBE with two clients" {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+
+ assert_equal {1} [ssubscribe $rd1 {chan1}]
+ assert_equal {1} [ssubscribe $rd2 {chan1}]
+ assert_equal 2 [r SPUBLISH chan1 hello]
+ assert_equal {smessage chan1 hello} [$rd1 read]
+ assert_equal {smessage chan1 hello} [$rd2 read]
+
+ # clean up clients
+ $rd1 close
+ $rd2 close
+ }
+
+ test "SPUBLISH/SSUBSCRIBE after UNSUBSCRIBE without arguments" {
+ set rd1 [redis_deferring_client]
+ assert_equal {1} [ssubscribe $rd1 {chan1}]
+ assert_equal {2} [ssubscribe $rd1 {chan2}]
+ assert_equal {3} [ssubscribe $rd1 {chan3}]
+ sunsubscribe $rd1
+ assert_equal 0 [r SPUBLISH chan1 hello]
+ assert_equal 0 [r SPUBLISH chan2 hello]
+ assert_equal 0 [r SPUBLISH chan3 hello]
+
+ # clean up clients
+ $rd1 close
+ }
+
+ test "SSUBSCRIBE to one channel more than once" {
+ set rd1 [redis_deferring_client]
+ assert_equal {1 1 1} [ssubscribe $rd1 {chan1 chan1 chan1}]
+ assert_equal 1 [r SPUBLISH chan1 hello]
+ assert_equal {smessage chan1 hello} [$rd1 read]
+
+ # clean up clients
+ $rd1 close
+ }
+
+ test "SUNSUBSCRIBE from non-subscribed channels" {
+ set rd1 [redis_deferring_client]
+ assert_equal {0} [sunsubscribe $rd1 {foo}]
+ assert_equal {0} [sunsubscribe $rd1 {bar}]
+ assert_equal {0} [sunsubscribe $rd1 {quux}]
+
+ # clean up clients
+ $rd1 close
+ }
+
+ test "PUBSUB command basics" {
+ r pubsub shardnumsub abc def
+ } {abc 0 def 0}
+
+ test "SPUBLISH/SSUBSCRIBE with two clients" {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+
+ assert_equal {1} [ssubscribe $rd1 {chan1}]
+ assert_equal {1} [ssubscribe $rd2 {chan1}]
+ assert_equal 2 [r SPUBLISH chan1 hello]
+ assert_equal "chan1 2" [r pubsub shardnumsub chan1]
+ assert_equal "chan1" [r pubsub shardchannels]
+
+ # clean up clients
+ $rd1 close
+ $rd2 close
+ }
+
+ test "SPUBLISH/SSUBSCRIBE with PUBLISH/SUBSCRIBE" {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+
+ assert_equal {1} [ssubscribe $rd1 {chan1}]
+ assert_equal {1} [subscribe $rd2 {chan1}]
+ assert_equal 1 [r SPUBLISH chan1 hello]
+ assert_equal 1 [r publish chan1 hello]
+ assert_equal "chan1 1" [r pubsub shardnumsub chan1]
+ assert_equal "chan1 1" [r pubsub numsub chan1]
+ assert_equal "chan1" [r pubsub shardchannels]
+ assert_equal "chan1" [r pubsub channels]
+
+ $rd1 close
+ $rd2 close
+ }
+
+ test "PubSubShard with CLIENT REPLY OFF" {
+ set rd [redis_deferring_client]
+ $rd hello 3
+ $rd read ;# Discard the hello reply
+
+ # Test that the ssubscribe notification is ok
+ $rd client reply off
+ $rd ping
+ assert_equal {1} [ssubscribe $rd channel]
+
+ # Test that the spublish notification is ok
+ $rd client reply off
+ $rd ping
+ assert_equal 1 [r spublish channel hello]
+ assert_equal {smessage channel hello} [$rd read]
+
+ # Test that sunsubscribe notification is ok
+ $rd client reply off
+ $rd ping
+ assert_equal {0} [sunsubscribe $rd channel]
+
+ $rd close
+ }
+}
+
+start_server {tags {"pubsubshard external:skip"}} {
+start_server {tags {"pubsubshard external:skip"}} {
+ set node_0 [srv 0 client]
+ set node_0_host [srv 0 host]
+ set node_0_port [srv 0 port]
+
+ set node_1 [srv -1 client]
+ set node_1_host [srv -1 host]
+ set node_1_port [srv -1 port]
+
+ test {setup replication for following tests} {
+ $node_1 replicaof $node_0_host $node_0_port
+ wait_for_sync $node_1
+ }
+
+ test {publish message to master and receive on replica} {
+ set rd0 [redis_deferring_client node_0_host node_0_port]
+ set rd1 [redis_deferring_client node_1_host node_1_port]
+
+ assert_equal {1} [ssubscribe $rd1 {chan1}]
+ $rd0 SPUBLISH chan1 hello
+ assert_equal {smessage chan1 hello} [$rd1 read]
+ $rd0 SPUBLISH chan1 world
+ assert_equal {smessage chan1 world} [$rd1 read]
+ }
+}
+} \ No newline at end of file
diff --git a/tests/unit/querybuf.tcl b/tests/unit/querybuf.tcl
new file mode 100644
index 0000000..f4859dd
--- /dev/null
+++ b/tests/unit/querybuf.tcl
@@ -0,0 +1,96 @@
+proc client_idle_sec {name} {
+ set clients [split [r client list] "\r\n"]
+ set c [lsearch -inline $clients *name=$name*]
+ assert {[regexp {idle=([0-9]+)} $c - idle]}
+ return $idle
+}
+
+# Calculate query buffer memory of client
+proc client_query_buffer {name} {
+ set clients [split [r client list] "\r\n"]
+ set c [lsearch -inline $clients *name=$name*]
+ if {[string length $c] > 0} {
+ assert {[regexp {qbuf=([0-9]+)} $c - qbuf]}
+ assert {[regexp {qbuf-free=([0-9]+)} $c - qbuf_free]}
+ return [expr $qbuf + $qbuf_free]
+ }
+ return 0
+}
+
+start_server {tags {"querybuf slow"}} {
+ # increase the execution frequency of clientsCron
+ r config set hz 100
+
+ # The test will run at least 2s to check if client query
+ # buffer will be resized when client idle 2s.
+ test "query buffer resized correctly" {
+ set rd [redis_client]
+ $rd client setname test_client
+ set orig_test_client_qbuf [client_query_buffer test_client]
+ # Make sure query buff has less than the peak resize threshold (PROTO_RESIZE_THRESHOLD) 32k
+ # but at least the basic IO reading buffer size (PROTO_IOBUF_LEN) 16k
+ assert {$orig_test_client_qbuf >= 16384 && $orig_test_client_qbuf < 32768}
+
+ # Check that the initial query buffer is resized after 2 sec
+ wait_for_condition 1000 10 {
+ [client_idle_sec test_client] >= 3 && [client_query_buffer test_client] == 0
+ } else {
+ fail "query buffer was not resized"
+ }
+ $rd close
+ }
+
+ test "query buffer resized correctly when not idle" {
+ # Pause cron to prevent premature shrinking (timing issue).
+ r debug pause-cron 1
+
+ # Memory will increase by more than 32k due to client query buffer.
+ set rd [redis_client]
+ $rd client setname test_client
+
+ # Create a large query buffer (more than PROTO_RESIZE_THRESHOLD - 32k)
+ $rd set x [string repeat A 400000]
+
+ # Make sure query buff is larger than the peak resize threshold (PROTO_RESIZE_THRESHOLD) 32k
+ set orig_test_client_qbuf [client_query_buffer test_client]
+ assert {$orig_test_client_qbuf > 32768}
+
+ r debug pause-cron 0
+
+ # Wait for qbuf to shrink due to lower peak
+ set t [clock milliseconds]
+ while true {
+ # Write something smaller, so query buf peak can shrink
+ $rd set x [string repeat A 100]
+ set new_test_client_qbuf [client_query_buffer test_client]
+ if {$new_test_client_qbuf < $orig_test_client_qbuf} { break }
+ if {[expr [clock milliseconds] - $t] > 1000} { break }
+ after 10
+ }
+ # Validate qbuf shrunk but isn't 0 since we maintain room based on latest peak
+ assert {[client_query_buffer test_client] > 0 && [client_query_buffer test_client] < $orig_test_client_qbuf}
+ $rd close
+ } {0} {needs:debug}
+
+ test "query buffer resized correctly with fat argv" {
+ set rd [redis_client]
+ $rd client setname test_client
+ $rd write "*3\r\n\$3\r\nset\r\n\$1\r\na\r\n\$1000000\r\n"
+ $rd flush
+
+ after 20
+ if {[client_query_buffer test_client] < 1000000} {
+ fail "query buffer should not be resized when client idle time smaller than 2s"
+ }
+
+ # Check that the query buffer is resized after 2 sec
+ wait_for_condition 1000 10 {
+ [client_idle_sec test_client] >= 3 && [client_query_buffer test_client] < 1000000
+ } else {
+ fail "query buffer should be resized when client idle time bigger than 2s"
+ }
+
+ $rd close
+ }
+
+}
diff --git a/tests/unit/quit.tcl b/tests/unit/quit.tcl
new file mode 100644
index 0000000..50ccab1
--- /dev/null
+++ b/tests/unit/quit.tcl
@@ -0,0 +1,33 @@
+start_server {tags {"quit"}} {
+
+ test "QUIT returns OK" {
+ reconnect
+ assert_equal OK [r quit]
+ assert_error * {r ping}
+ }
+
+ test "Pipelined commands after QUIT must not be executed" {
+ reconnect
+ r write [format_command quit]
+ r write [format_command set foo bar]
+ r flush
+ assert_equal OK [r read]
+ assert_error * {r read}
+
+ reconnect
+ assert_equal {} [r get foo]
+ }
+
+ test "Pipelined commands after QUIT that exceed read buffer size" {
+ reconnect
+ r write [format_command quit]
+ r write [format_command set foo [string repeat "x" 1024]]
+ r flush
+ assert_equal OK [r read]
+ assert_error * {r read}
+
+ reconnect
+ assert_equal {} [r get foo]
+
+ }
+}
diff --git a/tests/unit/replybufsize.tcl b/tests/unit/replybufsize.tcl
new file mode 100644
index 0000000..933189e
--- /dev/null
+++ b/tests/unit/replybufsize.tcl
@@ -0,0 +1,47 @@
+proc get_reply_buffer_size {cname} {
+
+ set clients [split [string trim [r client list]] "\r\n"]
+ set c [lsearch -inline $clients *name=$cname*]
+ if {![regexp rbs=(\[a-zA-Z0-9-\]+) $c - rbufsize]} {
+ error "field rbs not found in $c"
+ }
+ return $rbufsize
+}
+
+start_server {tags {"replybufsize"}} {
+
+ test {verify reply buffer limits} {
+ # In order to reduce test time we can set the peak reset time very low
+ r debug replybuffer peak-reset-time 100
+
+ # Create a simple idle test client
+ variable tc [redis_client]
+ $tc client setname test_client
+
+ # make sure the client is idle for 1 seconds to make it shrink the reply buffer
+ wait_for_condition 10 100 {
+ [get_reply_buffer_size test_client] >= 1024 && [get_reply_buffer_size test_client] < 2046
+ } else {
+ set rbs [get_reply_buffer_size test_client]
+ fail "reply buffer of idle client is $rbs after 1 seconds"
+ }
+
+ r set bigval [string repeat x 32768]
+
+ # In order to reduce test time we can set the peak reset time very low
+ r debug replybuffer peak-reset-time never
+
+ wait_for_condition 10 100 {
+ [$tc get bigval ; get_reply_buffer_size test_client] >= 16384 && [get_reply_buffer_size test_client] < 32768
+ } else {
+ set rbs [get_reply_buffer_size test_client]
+ fail "reply buffer of busy client is $rbs after 1 seconds"
+ }
+
+ # Restore the peak reset time to default
+ r debug replybuffer peak-reset-time reset
+
+ $tc close
+ } {0} {needs:debug}
+}
+ \ No newline at end of file
diff --git a/tests/unit/scan.tcl b/tests/unit/scan.tcl
new file mode 100644
index 0000000..d688d7c
--- /dev/null
+++ b/tests/unit/scan.tcl
@@ -0,0 +1,433 @@
+start_server {tags {"scan network"}} {
+ test "SCAN basic" {
+ r flushdb
+ populate 1000
+
+ set cur 0
+ set keys {}
+ while 1 {
+ set res [r scan $cur]
+ set cur [lindex $res 0]
+ set k [lindex $res 1]
+ lappend keys {*}$k
+ if {$cur == 0} break
+ }
+
+ set keys [lsort -unique $keys]
+ assert_equal 1000 [llength $keys]
+ }
+
+ test "SCAN COUNT" {
+ r flushdb
+ populate 1000
+
+ set cur 0
+ set keys {}
+ while 1 {
+ set res [r scan $cur count 5]
+ set cur [lindex $res 0]
+ set k [lindex $res 1]
+ lappend keys {*}$k
+ if {$cur == 0} break
+ }
+
+ set keys [lsort -unique $keys]
+ assert_equal 1000 [llength $keys]
+ }
+
+ test "SCAN MATCH" {
+ r flushdb
+ populate 1000
+
+ set cur 0
+ set keys {}
+ while 1 {
+ set res [r scan $cur match "key:1??"]
+ set cur [lindex $res 0]
+ set k [lindex $res 1]
+ lappend keys {*}$k
+ if {$cur == 0} break
+ }
+
+ set keys [lsort -unique $keys]
+ assert_equal 100 [llength $keys]
+ }
+
+ test "SCAN TYPE" {
+ r flushdb
+ # populate only creates strings
+ populate 1000
+
+ # Check non-strings are excluded
+ set cur 0
+ set keys {}
+ while 1 {
+ set res [r scan $cur type "list"]
+ set cur [lindex $res 0]
+ set k [lindex $res 1]
+ lappend keys {*}$k
+ if {$cur == 0} break
+ }
+
+ assert_equal 0 [llength $keys]
+
+ # Check strings are included
+ set cur 0
+ set keys {}
+ while 1 {
+ set res [r scan $cur type "string"]
+ set cur [lindex $res 0]
+ set k [lindex $res 1]
+ lappend keys {*}$k
+ if {$cur == 0} break
+ }
+
+ assert_equal 1000 [llength $keys]
+
+ # Check all three args work together
+ set cur 0
+ set keys {}
+ while 1 {
+ set res [r scan $cur type "string" match "key:*" count 10]
+ set cur [lindex $res 0]
+ set k [lindex $res 1]
+ lappend keys {*}$k
+ if {$cur == 0} break
+ }
+
+ assert_equal 1000 [llength $keys]
+ }
+
+ test "SCAN unknown type" {
+ r flushdb
+ # make sure that passive expiration is triggered by the scan
+ r debug set-active-expire 0
+
+ populate 1000
+ r hset hash f v
+ r pexpire hash 1
+
+ after 2
+
+ # TODO: remove this in redis 8.0
+ set cur 0
+ set keys {}
+ while 1 {
+ set res [r scan $cur type "string1"]
+ set cur [lindex $res 0]
+ set k [lindex $res 1]
+ lappend keys {*}$k
+ if {$cur == 0} break
+ }
+
+ assert_equal 0 [llength $keys]
+ # make sure that expired key have been removed by scan command
+ assert_equal 1000 [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d]
+
+ # TODO: uncomment in redis 8.0
+ #assert_error "*unknown type name*" {r scan 0 type "string1"}
+ # expired key will be no touched by scan command
+ #assert_equal 1001 [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d]
+ r debug set-active-expire 1
+ } {OK} {needs:debug}
+
+ test "SCAN with expired keys" {
+ r flushdb
+ # make sure that passive expiration is triggered by the scan
+ r debug set-active-expire 0
+
+ populate 1000
+ r set foo bar
+ r pexpire foo 1
+
+ # add a hash type key
+ r hset hash f v
+ r pexpire hash 1
+
+ after 2
+
+ set cur 0
+ set keys {}
+ while 1 {
+ set res [r scan $cur count 10]
+ set cur [lindex $res 0]
+ set k [lindex $res 1]
+ lappend keys {*}$k
+ if {$cur == 0} break
+ }
+
+ assert_equal 1000 [llength $keys]
+
+ # make sure that expired key have been removed by scan command
+ assert_equal 1000 [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d]
+
+ r debug set-active-expire 1
+ } {OK} {needs:debug}
+
+ test "SCAN with expired keys with TYPE filter" {
+ r flushdb
+ # make sure that passive expiration is triggered by the scan
+ r debug set-active-expire 0
+
+ populate 1000
+ r set foo bar
+ r pexpire foo 1
+
+ # add a hash type key
+ r hset hash f v
+ r pexpire hash 1
+
+ after 2
+
+ set cur 0
+ set keys {}
+ while 1 {
+ set res [r scan $cur type "string" count 10]
+ set cur [lindex $res 0]
+ set k [lindex $res 1]
+ lappend keys {*}$k
+ if {$cur == 0} break
+ }
+
+ assert_equal 1000 [llength $keys]
+
+ # make sure that expired key have been removed by scan command
+ assert_equal 1000 [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d]
+ # TODO: uncomment in redis 8.0
+ # make sure that only the expired key in the type match will been removed by scan command
+ #assert_equal 1001 [scan [regexp -inline {keys\=([\d]*)} [r info keyspace]] keys=%d]
+
+ r debug set-active-expire 1
+ } {OK} {needs:debug}
+
+ foreach enc {intset listpack hashtable} {
+ test "SSCAN with encoding $enc" {
+ # Create the Set
+ r del set
+ if {$enc eq {intset}} {
+ set prefix ""
+ } else {
+ set prefix "ele:"
+ }
+ set count [expr {$enc eq "hashtable" ? 200 : 100}]
+ set elements {}
+ for {set j 0} {$j < $count} {incr j} {
+ lappend elements ${prefix}${j}
+ }
+ r sadd set {*}$elements
+
+ # Verify that the encoding matches.
+ assert_encoding $enc set
+
+ # Test SSCAN
+ set cur 0
+ set keys {}
+ while 1 {
+ set res [r sscan set $cur]
+ set cur [lindex $res 0]
+ set k [lindex $res 1]
+ lappend keys {*}$k
+ if {$cur == 0} break
+ }
+
+ set keys [lsort -unique $keys]
+ assert_equal $count [llength $keys]
+ }
+ }
+
+ foreach enc {listpack hashtable} {
+ test "HSCAN with encoding $enc" {
+ # Create the Hash
+ r del hash
+ if {$enc eq {listpack}} {
+ set count 30
+ } else {
+ set count 1000
+ }
+ set elements {}
+ for {set j 0} {$j < $count} {incr j} {
+ lappend elements key:$j $j
+ }
+ r hmset hash {*}$elements
+
+ # Verify that the encoding matches.
+ assert_encoding $enc hash
+
+ # Test HSCAN
+ set cur 0
+ set keys {}
+ while 1 {
+ set res [r hscan hash $cur]
+ set cur [lindex $res 0]
+ set k [lindex $res 1]
+ lappend keys {*}$k
+ if {$cur == 0} break
+ }
+
+ set keys2 {}
+ foreach {k v} $keys {
+ assert {$k eq "key:$v"}
+ lappend keys2 $k
+ }
+
+ set keys2 [lsort -unique $keys2]
+ assert_equal $count [llength $keys2]
+ }
+ }
+
+ foreach enc {listpack skiplist} {
+ test "ZSCAN with encoding $enc" {
+ # Create the Sorted Set
+ r del zset
+ if {$enc eq {listpack}} {
+ set count 30
+ } else {
+ set count 1000
+ }
+ set elements {}
+ for {set j 0} {$j < $count} {incr j} {
+ lappend elements $j key:$j
+ }
+ r zadd zset {*}$elements
+
+ # Verify that the encoding matches.
+ assert_encoding $enc zset
+
+ # Test ZSCAN
+ set cur 0
+ set keys {}
+ while 1 {
+ set res [r zscan zset $cur]
+ set cur [lindex $res 0]
+ set k [lindex $res 1]
+ lappend keys {*}$k
+ if {$cur == 0} break
+ }
+
+ set keys2 {}
+ foreach {k v} $keys {
+ assert {$k eq "key:$v"}
+ lappend keys2 $k
+ }
+
+ set keys2 [lsort -unique $keys2]
+ assert_equal $count [llength $keys2]
+ }
+ }
+
+ test "SCAN guarantees check under write load" {
+ r flushdb
+ populate 100
+
+ # We start scanning here, so keys from 0 to 99 should all be
+ # reported at the end of the iteration.
+ set keys {}
+ while 1 {
+ set res [r scan $cur]
+ set cur [lindex $res 0]
+ set k [lindex $res 1]
+ lappend keys {*}$k
+ if {$cur == 0} break
+ # Write 10 random keys at every SCAN iteration.
+ for {set j 0} {$j < 10} {incr j} {
+ r set addedkey:[randomInt 1000] foo
+ }
+ }
+
+ set keys2 {}
+ foreach k $keys {
+ if {[string length $k] > 6} continue
+ lappend keys2 $k
+ }
+
+ set keys2 [lsort -unique $keys2]
+ assert_equal 100 [llength $keys2]
+ }
+
+ test "SSCAN with integer encoded object (issue #1345)" {
+ set objects {1 a}
+ r del set
+ r sadd set {*}$objects
+ set res [r sscan set 0 MATCH *a* COUNT 100]
+ assert_equal [lsort -unique [lindex $res 1]] {a}
+ set res [r sscan set 0 MATCH *1* COUNT 100]
+ assert_equal [lsort -unique [lindex $res 1]] {1}
+ }
+
+ test "SSCAN with PATTERN" {
+ r del mykey
+ r sadd mykey foo fab fiz foobar 1 2 3 4
+ set res [r sscan mykey 0 MATCH foo* COUNT 10000]
+ lsort -unique [lindex $res 1]
+ } {foo foobar}
+
+ test "HSCAN with PATTERN" {
+ r del mykey
+ r hmset mykey foo 1 fab 2 fiz 3 foobar 10 1 a 2 b 3 c 4 d
+ set res [r hscan mykey 0 MATCH foo* COUNT 10000]
+ lsort -unique [lindex $res 1]
+ } {1 10 foo foobar}
+
+ test "ZSCAN with PATTERN" {
+ r del mykey
+ r zadd mykey 1 foo 2 fab 3 fiz 10 foobar
+ set res [r zscan mykey 0 MATCH foo* COUNT 10000]
+ lsort -unique [lindex $res 1]
+ }
+
+ test "ZSCAN scores: regression test for issue #2175" {
+ r del mykey
+ for {set j 0} {$j < 500} {incr j} {
+ r zadd mykey 9.8813129168249309e-323 $j
+ }
+ set res [lindex [r zscan mykey 0] 1]
+ set first_score [lindex $res 1]
+ assert {$first_score != 0}
+ }
+
+ test "SCAN regression test for issue #4906" {
+ for {set k 0} {$k < 100} {incr k} {
+ r del set
+ r sadd set x; # Make sure it's not intset encoded
+ set toremove {}
+ unset -nocomplain found
+ array set found {}
+
+ # Populate the set
+ set numele [expr {101+[randomInt 1000]}]
+ for {set j 0} {$j < $numele} {incr j} {
+ r sadd set $j
+ if {$j >= 100} {
+ lappend toremove $j
+ }
+ }
+
+ # Start scanning
+ set cursor 0
+ set iteration 0
+ set del_iteration [randomInt 10]
+ while {!($cursor == 0 && $iteration != 0)} {
+ lassign [r sscan set $cursor] cursor items
+
+ # Mark found items. We expect to find from 0 to 99 at the end
+ # since those elements will never be removed during the scanning.
+ foreach i $items {
+ set found($i) 1
+ }
+ incr iteration
+ # At some point remove most of the items to trigger the
+ # rehashing to a smaller hash table.
+ if {$iteration == $del_iteration} {
+ r srem set {*}$toremove
+ }
+ }
+
+ # Verify that SSCAN reported everything from 0 to 99
+ for {set j 0} {$j < 100} {incr j} {
+ if {![info exists found($j)]} {
+ fail "SSCAN element missing $j"
+ }
+ }
+ }
+ }
+}
diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl
new file mode 100644
index 0000000..c2f79a7
--- /dev/null
+++ b/tests/unit/scripting.tcl
@@ -0,0 +1,2213 @@
+foreach is_eval {0 1} {
+
+if {$is_eval == 1} {
+ proc run_script {args} {
+ r eval {*}$args
+ }
+ proc run_script_ro {args} {
+ r eval_ro {*}$args
+ }
+ proc run_script_on_connection {args} {
+ [lindex $args 0] eval {*}[lrange $args 1 end]
+ }
+ proc kill_script {args} {
+ r script kill
+ }
+} else {
+ proc run_script {args} {
+ r function load replace [format "#!lua name=test\nredis.register_function('test', function(KEYS, ARGV)\n %s \nend)" [lindex $args 0]]
+ if {[r readingraw] eq 1} {
+ # read name
+ assert_equal {test} [r read]
+ }
+ r fcall test {*}[lrange $args 1 end]
+ }
+ proc run_script_ro {args} {
+ r function load replace [format "#!lua name=test\nredis.register_function{function_name='test', callback=function(KEYS, ARGV)\n %s \nend, flags={'no-writes'}}" [lindex $args 0]]
+ if {[r readingraw] eq 1} {
+ # read name
+ assert_equal {test} [r read]
+ }
+ r fcall_ro test {*}[lrange $args 1 end]
+ }
+ proc run_script_on_connection {args} {
+ set rd [lindex $args 0]
+ $rd function load replace [format "#!lua name=test\nredis.register_function('test', function(KEYS, ARGV)\n %s \nend)" [lindex $args 1]]
+ # read name
+ $rd read
+ $rd fcall test {*}[lrange $args 2 end]
+ }
+ proc kill_script {args} {
+ r function kill
+ }
+}
+
+start_server {tags {"scripting"}} {
+
+ if {$is_eval eq 1} {
+ test {Script - disallow write on OOM} {
+ r config set maxmemory 1
+
+ catch {[r eval "redis.call('set', 'x', 1)" 0]} e
+ assert_match {*command not allowed when used memory*} $e
+
+ r config set maxmemory 0
+ } {OK} {needs:config-maxmemory}
+ } ;# is_eval
+
+ test {EVAL - Does Lua interpreter replies to our requests?} {
+ run_script {return 'hello'} 0
+ } {hello}
+
+ test {EVAL - Return _G} {
+ run_script {return _G} 0
+ } {}
+
+ test {EVAL - Return table with a metatable that raise error} {
+ run_script {local a = {}; setmetatable(a,{__index=function() foo() end}) return a} 0
+ } {}
+
+ test {EVAL - Return table with a metatable that call redis} {
+ run_script {local a = {}; setmetatable(a,{__index=function() redis.call('set', 'x', '1') end}) return a} 1 x
+ # make sure x was not set
+ r get x
+ } {}
+
+ test {EVAL - Lua integer -> Redis protocol type conversion} {
+ run_script {return 100.5} 0
+ } {100}
+
+ test {EVAL - Lua string -> Redis protocol type conversion} {
+ run_script {return 'hello world'} 0
+ } {hello world}
+
+ test {EVAL - Lua true boolean -> Redis protocol type conversion} {
+ run_script {return true} 0
+ } {1}
+
+ test {EVAL - Lua false boolean -> Redis protocol type conversion} {
+ run_script {return false} 0
+ } {}
+
+ test {EVAL - Lua status code reply -> Redis protocol type conversion} {
+ run_script {return {ok='fine'}} 0
+ } {fine}
+
+ test {EVAL - Lua error reply -> Redis protocol type conversion} {
+ catch {
+ run_script {return {err='ERR this is an error'}} 0
+ } e
+ set _ $e
+ } {ERR this is an error}
+
+ test {EVAL - Lua table -> Redis protocol type conversion} {
+ run_script {return {1,2,3,'ciao',{1,2}}} 0
+ } {1 2 3 ciao {1 2}}
+
+ test {EVAL - Are the KEYS and ARGV arrays populated correctly?} {
+ run_script {return {KEYS[1],KEYS[2],ARGV[1],ARGV[2]}} 2 a{t} b{t} c{t} d{t}
+ } {a{t} b{t} c{t} d{t}}
+
+ test {EVAL - is Lua able to call Redis API?} {
+ r set mykey myval
+ run_script {return redis.call('get',KEYS[1])} 1 mykey
+ } {myval}
+
+ if {$is_eval eq 1} {
+ # eval sha is only relevant for is_eval Lua
+ test {EVALSHA - Can we call a SHA1 if already defined?} {
+ r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey
+ } {myval}
+
+ test {EVALSHA_RO - Can we call a SHA1 if already defined?} {
+ r evalsha_ro fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey
+ } {myval}
+
+ test {EVALSHA - Can we call a SHA1 in uppercase?} {
+ r evalsha FD758D1589D044DD850A6F05D52F2EEFD27F033F 1 mykey
+ } {myval}
+
+ test {EVALSHA - Do we get an error on invalid SHA1?} {
+ catch {r evalsha NotValidShaSUM 0} e
+ set _ $e
+ } {NOSCRIPT*}
+
+ test {EVALSHA - Do we get an error on non defined SHA1?} {
+ catch {r evalsha ffd632c7d33e571e9f24556ebed26c3479a87130 0} e
+ set _ $e
+ } {NOSCRIPT*}
+ } ;# is_eval
+
+ test {EVAL - Redis integer -> Lua type conversion} {
+ r set x 0
+ run_script {
+ local foo = redis.pcall('incr',KEYS[1])
+ return {type(foo),foo}
+ } 1 x
+ } {number 1}
+
+ test {EVAL - Redis bulk -> Lua type conversion} {
+ r set mykey myval
+ run_script {
+ local foo = redis.pcall('get',KEYS[1])
+ return {type(foo),foo}
+ } 1 mykey
+ } {string myval}
+
+ test {EVAL - Redis multi bulk -> Lua type conversion} {
+ r del mylist
+ r rpush mylist a
+ r rpush mylist b
+ r rpush mylist c
+ run_script {
+ local foo = redis.pcall('lrange',KEYS[1],0,-1)
+ return {type(foo),foo[1],foo[2],foo[3],# foo}
+ } 1 mylist
+ } {table a b c 3}
+
+ test {EVAL - Redis status reply -> Lua type conversion} {
+ run_script {
+ local foo = redis.pcall('set',KEYS[1],'myval')
+ return {type(foo),foo['ok']}
+ } 1 mykey
+ } {table OK}
+
+ test {EVAL - Redis error reply -> Lua type conversion} {
+ r set mykey myval
+ run_script {
+ local foo = redis.pcall('incr',KEYS[1])
+ return {type(foo),foo['err']}
+ } 1 mykey
+ } {table {ERR value is not an integer or out of range}}
+
+ test {EVAL - Redis nil bulk reply -> Lua type conversion} {
+ r del mykey
+ run_script {
+ local foo = redis.pcall('get',KEYS[1])
+ return {type(foo),foo == false}
+ } 1 mykey
+ } {boolean 1}
+
+ test {EVAL - Is the Lua client using the currently selected DB?} {
+ r set mykey "this is DB 9"
+ r select 10
+ r set mykey "this is DB 10"
+ run_script {return redis.pcall('get',KEYS[1])} 1 mykey
+ } {this is DB 10} {singledb:skip}
+
+ test {EVAL - SELECT inside Lua should not affect the caller} {
+ # here we DB 10 is selected
+ r set mykey "original value"
+ run_script {return redis.pcall('select','9')} 0
+ set res [r get mykey]
+ r select 9
+ set res
+ } {original value} {singledb:skip}
+
+ if 0 {
+ test {EVAL - Script can't run more than configured time limit} {
+ r config set lua-time-limit 1
+ catch {
+ run_script {
+ local i = 0
+ while true do i=i+1 end
+ } 0
+ } e
+ set _ $e
+ } {*execution time*}
+ }
+
+ test {EVAL - Scripts do not block on blpop command} {
+ r lpush l 1
+ r lpop l
+ run_script {return redis.pcall('blpop','l',0)} 1 l
+ } {}
+
+ test {EVAL - Scripts do not block on brpop command} {
+ r lpush l 1
+ r lpop l
+ run_script {return redis.pcall('brpop','l',0)} 1 l
+ } {}
+
+ test {EVAL - Scripts do not block on brpoplpush command} {
+ r lpush empty_list1{t} 1
+ r lpop empty_list1{t}
+ run_script {return redis.pcall('brpoplpush','empty_list1{t}', 'empty_list2{t}',0)} 2 empty_list1{t} empty_list2{t}
+ } {}
+
+ test {EVAL - Scripts do not block on blmove command} {
+ r lpush empty_list1{t} 1
+ r lpop empty_list1{t}
+ run_script {return redis.pcall('blmove','empty_list1{t}', 'empty_list2{t}', 'LEFT', 'LEFT', 0)} 2 empty_list1{t} empty_list2{t}
+ } {}
+
+ test {EVAL - Scripts do not block on bzpopmin command} {
+ r zadd empty_zset 10 foo
+ r zmpop 1 empty_zset MIN
+ run_script {return redis.pcall('bzpopmin','empty_zset', 0)} 1 empty_zset
+ } {}
+
+ test {EVAL - Scripts do not block on bzpopmax command} {
+ r zadd empty_zset 10 foo
+ r zmpop 1 empty_zset MIN
+ run_script {return redis.pcall('bzpopmax','empty_zset', 0)} 1 empty_zset
+ } {}
+
+ test {EVAL - Scripts do not block on wait} {
+ run_script {return redis.pcall('wait','1','0')} 0
+ } {0}
+
+ test {EVAL - Scripts can't run XREAD and XREADGROUP with BLOCK option} {
+ r del s
+ r xgroup create s g $ MKSTREAM
+ set res [run_script {return redis.pcall('xread','STREAMS','s','$')} 1 s]
+ assert {$res eq {}}
+ assert_error "*xread command is not allowed with BLOCK option from scripts" {run_script {return redis.pcall('xread','BLOCK',0,'STREAMS','s','$')} 1 s}
+ set res [run_script {return redis.pcall('xreadgroup','group','g','c','STREAMS','s','>')} 1 s]
+ assert {$res eq {}}
+ assert_error "*xreadgroup command is not allowed with BLOCK option from scripts" {run_script {return redis.pcall('xreadgroup','group','g','c','BLOCK',0,'STREAMS','s','>')} 1 s}
+ }
+
+ test {EVAL - Scripts can run non-deterministic commands} {
+ set e {}
+ catch {
+ run_script {redis.pcall('randomkey'); return redis.pcall('set','x','ciao')} 1 x
+ } e
+ set e
+ } {*OK*}
+
+ test {EVAL - No arguments to redis.call/pcall is considered an error} {
+ set e {}
+ catch {run_script {return redis.call()} 0} e
+ set e
+ } {*one argument*}
+
+ test {EVAL - redis.call variant raises a Lua error on Redis cmd error (1)} {
+ set e {}
+ catch {
+ run_script "redis.call('nosuchcommand')" 0
+ } e
+ set e
+ } {*Unknown Redis*}
+
+ test {EVAL - redis.call variant raises a Lua error on Redis cmd error (1)} {
+ set e {}
+ catch {
+ run_script "redis.call('get','a','b','c')" 0
+ } e
+ set e
+ } {*number of args*}
+
+ test {EVAL - redis.call variant raises a Lua error on Redis cmd error (1)} {
+ set e {}
+ r set foo bar
+ catch {
+ run_script {redis.call('lpush',KEYS[1],'val')} 1 foo
+ } e
+ set e
+ } {*against a key*}
+
+ test {EVAL - JSON string encoding a string larger than 2GB} {
+ run_script {
+ local s = string.rep("a", 1024 * 1024 * 1024)
+ return #cjson.encode(s..s..s)
+ } 0
+ } {3221225474} {large-memory} ;# length includes two double quotes at both ends
+
+ test {EVAL - JSON numeric decoding} {
+ # We must return the table as a string because otherwise
+ # Redis converts floats to ints and we get 0 and 1023 instead
+ # of 0.0003 and 1023.2 as the parsed output.
+ run_script {return
+ table.concat(
+ cjson.decode(
+ "[0.0, -5e3, -1, 0.3e-3, 1023.2, 0e10]"), " ")
+ } 0
+ } {0 -5000 -1 0.0003 1023.2 0}
+
+ test {EVAL - JSON string decoding} {
+ run_script {local decoded = cjson.decode('{"keya": "a", "keyb": "b"}')
+ return {decoded.keya, decoded.keyb}
+ } 0
+ } {a b}
+
+ test {EVAL - JSON smoke test} {
+ run_script {
+ local some_map = {
+ s1="Some string",
+ n1=100,
+ a1={"Some","String","Array"},
+ nil1=nil,
+ b1=true,
+ b2=false}
+ local encoded = cjson.encode(some_map)
+ local decoded = cjson.decode(encoded)
+ assert(table.concat(some_map) == table.concat(decoded))
+
+ cjson.encode_keep_buffer(false)
+ encoded = cjson.encode(some_map)
+ decoded = cjson.decode(encoded)
+ assert(table.concat(some_map) == table.concat(decoded))
+
+ -- Table with numeric keys
+ local table1 = {one="one", [1]="one"}
+ encoded = cjson.encode(table1)
+ decoded = cjson.decode(encoded)
+ assert(decoded["one"] == table1["one"])
+ assert(decoded["1"] == table1[1])
+
+ -- Array
+ local array1 = {[1]="one", [2]="two"}
+ encoded = cjson.encode(array1)
+ decoded = cjson.decode(encoded)
+ assert(table.concat(array1) == table.concat(decoded))
+
+ -- Invalid keys
+ local invalid_map = {}
+ invalid_map[false] = "false"
+ local ok, encoded = pcall(cjson.encode, invalid_map)
+ assert(ok == false)
+
+ -- Max depth
+ cjson.encode_max_depth(1)
+ ok, encoded = pcall(cjson.encode, some_map)
+ assert(ok == false)
+
+ cjson.decode_max_depth(1)
+ ok, decoded = pcall(cjson.decode, '{"obj": {"array": [1,2,3,4]}}')
+ assert(ok == false)
+
+ -- Invalid numbers
+ ok, encoded = pcall(cjson.encode, {num1=0/0})
+ assert(ok == false)
+ cjson.encode_invalid_numbers(true)
+ ok, encoded = pcall(cjson.encode, {num1=0/0})
+ assert(ok == true)
+
+ -- Restore defaults
+ cjson.decode_max_depth(1000)
+ cjson.encode_max_depth(1000)
+ cjson.encode_invalid_numbers(false)
+ } 0
+ }
+
+ test {EVAL - cmsgpack can pack double?} {
+ run_script {local encoded = cmsgpack.pack(0.1)
+ local h = ""
+ for i = 1, #encoded do
+ h = h .. string.format("%02x",string.byte(encoded,i))
+ end
+ return h
+ } 0
+ } {cb3fb999999999999a}
+
+ test {EVAL - cmsgpack can pack negative int64?} {
+ run_script {local encoded = cmsgpack.pack(-1099511627776)
+ local h = ""
+ for i = 1, #encoded do
+ h = h .. string.format("%02x",string.byte(encoded,i))
+ end
+ return h
+ } 0
+ } {d3ffffff0000000000}
+
+ test {EVAL - cmsgpack pack/unpack smoke test} {
+ run_script {
+ local str_lt_32 = string.rep("x", 30)
+ local str_lt_255 = string.rep("x", 250)
+ local str_lt_65535 = string.rep("x", 65530)
+ local str_long = string.rep("x", 100000)
+ local array_lt_15 = {1, 2, 3, 4, 5}
+ local array_lt_65535 = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}
+ local array_big = {}
+ for i=1, 100000 do
+ array_big[i] = i
+ end
+ local map_lt_15 = {a=1, b=2}
+ local map_big = {}
+ for i=1, 100000 do
+ map_big[tostring(i)] = i
+ end
+ local some_map = {
+ s1=str_lt_32,
+ s2=str_lt_255,
+ s3=str_lt_65535,
+ s4=str_long,
+ d1=0.1,
+ i1=1,
+ i2=250,
+ i3=65530,
+ i4=100000,
+ i5=2^40,
+ i6=-1,
+ i7=-120,
+ i8=-32000,
+ i9=-100000,
+ i10=-3147483648,
+ a1=array_lt_15,
+ a2=array_lt_65535,
+ a3=array_big,
+ m1=map_lt_15,
+ m2=map_big,
+ b1=false,
+ b2=true,
+ n=nil
+ }
+ local encoded = cmsgpack.pack(some_map)
+ local decoded = cmsgpack.unpack(encoded)
+ assert(table.concat(some_map) == table.concat(decoded))
+ local offset, decoded_one = cmsgpack.unpack_one(encoded, 0)
+ assert(table.concat(some_map) == table.concat(decoded_one))
+ assert(offset == -1)
+
+ local encoded_multiple = cmsgpack.pack(str_lt_32, str_lt_255, str_lt_65535, str_long)
+ local offset, obj = cmsgpack.unpack_limit(encoded_multiple, 1, 0)
+ assert(obj == str_lt_32)
+ offset, obj = cmsgpack.unpack_limit(encoded_multiple, 1, offset)
+ assert(obj == str_lt_255)
+ offset, obj = cmsgpack.unpack_limit(encoded_multiple, 1, offset)
+ assert(obj == str_lt_65535)
+ offset, obj = cmsgpack.unpack_limit(encoded_multiple, 1, offset)
+ assert(obj == str_long)
+ assert(offset == -1)
+ } 0
+ }
+
+ test {EVAL - cmsgpack can pack and unpack circular references?} {
+ run_script {local a = {x=nil,y=5}
+ local b = {x=a}
+ a['x'] = b
+ local encoded = cmsgpack.pack(a)
+ local h = ""
+ -- cmsgpack encodes to a depth of 16, but can't encode
+ -- references, so the encoded object has a deep copy recursive
+ -- depth of 16.
+ for i = 1, #encoded do
+ h = h .. string.format("%02x",string.byte(encoded,i))
+ end
+ -- when unpacked, re.x.x != re because the unpack creates
+ -- individual tables down to a depth of 16.
+ -- (that's why the encoded output is so large)
+ local re = cmsgpack.unpack(encoded)
+ assert(re)
+ assert(re.x)
+ assert(re.x.x.y == re.y)
+ assert(re.x.x.x.x.y == re.y)
+ assert(re.x.x.x.x.x.x.y == re.y)
+ assert(re.x.x.x.x.x.x.x.x.x.x.y == re.y)
+ -- maximum working depth:
+ assert(re.x.x.x.x.x.x.x.x.x.x.x.x.x.x.y == re.y)
+ -- now the last x would be b above and has no y
+ assert(re.x.x.x.x.x.x.x.x.x.x.x.x.x.x.x)
+ -- so, the final x.x is at the depth limit and was assigned nil
+ assert(re.x.x.x.x.x.x.x.x.x.x.x.x.x.x.x.x == nil)
+ return {h, re.x.x.x.x.x.x.x.x.y == re.y, re.y == 5}
+ } 0
+ } {82a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a17882a17905a17881a178c0 1 1}
+
+ test {EVAL - Numerical sanity check from bitop} {
+ run_script {assert(0x7fffffff == 2147483647, "broken hex literals");
+ assert(0xffffffff == -1 or 0xffffffff == 2^32-1,
+ "broken hex literals");
+ assert(tostring(-1) == "-1", "broken tostring()");
+ assert(tostring(0xffffffff) == "-1" or
+ tostring(0xffffffff) == "4294967295",
+ "broken tostring()")
+ } 0
+ } {}
+
+ test {EVAL - Verify minimal bitop functionality} {
+ run_script {assert(bit.tobit(1) == 1);
+ assert(bit.band(1) == 1);
+ assert(bit.bxor(1,2) == 3);
+ assert(bit.bor(1,2,4,8,16,32,64,128) == 255)
+ } 0
+ } {}
+
+ test {EVAL - Able to parse trailing comments} {
+ run_script {return 'hello' --trailing comment} 0
+ } {hello}
+
+ test {EVAL_RO - Successful case} {
+ r set foo bar
+ assert_equal bar [run_script_ro {return redis.call('get', KEYS[1]);} 1 foo]
+ }
+
+ test {EVAL_RO - Cannot run write commands} {
+ r set foo bar
+ catch {run_script_ro {redis.call('del', KEYS[1]);} 1 foo} e
+ set e
+ } {ERR Write commands are not allowed from read-only scripts*}
+
+ if {$is_eval eq 1} {
+ # script command is only relevant for is_eval Lua
+ test {SCRIPTING FLUSH - is able to clear the scripts cache?} {
+ r set mykey myval
+ set v [r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey]
+ assert_equal $v myval
+ set e ""
+ r script flush
+ catch {r evalsha fd758d1589d044dd850a6f05d52f2eefd27f033f 1 mykey} e
+ set e
+ } {NOSCRIPT*}
+
+ test {SCRIPTING FLUSH ASYNC} {
+ for {set j 0} {$j < 100} {incr j} {
+ r script load "return $j"
+ }
+ assert { [string match "*number_of_cached_scripts:100*" [r info Memory]] }
+ r script flush async
+ assert { [string match "*number_of_cached_scripts:0*" [r info Memory]] }
+ }
+
+ test {SCRIPT EXISTS - can detect already defined scripts?} {
+ r eval "return 1+1" 0
+ r script exists a27e7e8a43702b7046d4f6a7ccf5b60cef6b9bd9 a27e7e8a43702b7046d4f6a7ccf5b60cef6b9bda
+ } {1 0}
+
+ test {SCRIPT LOAD - is able to register scripts in the scripting cache} {
+ list \
+ [r script load "return 'loaded'"] \
+ [r evalsha b534286061d4b9e4026607613b95c06c06015ae8 0]
+ } {b534286061d4b9e4026607613b95c06c06015ae8 loaded}
+
+ test "SORT is normally not alpha re-ordered for the scripting engine" {
+ r del myset
+ r sadd myset 1 2 3 4 10
+ r eval {return redis.call('sort',KEYS[1],'desc')} 1 myset
+ } {10 4 3 2 1} {cluster:skip}
+
+ test "SORT BY <constant> output gets ordered for scripting" {
+ r del myset
+ r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz
+ r eval {return redis.call('sort',KEYS[1],'by','_')} 1 myset
+ } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} {cluster:skip}
+
+ test "SORT BY <constant> with GET gets ordered for scripting" {
+ r del myset
+ r sadd myset a b c
+ r eval {return redis.call('sort',KEYS[1],'by','_','get','#','get','_:*')} 1 myset
+ } {a {} b {} c {}} {cluster:skip}
+ } ;# is_eval
+
+ test "redis.sha1hex() implementation" {
+ list [run_script {return redis.sha1hex('')} 0] \
+ [run_script {return redis.sha1hex('Pizza & Mandolino')} 0]
+ } {da39a3ee5e6b4b0d3255bfef95601890afd80709 74822d82031af7493c20eefa13bd07ec4fada82f}
+
+ test {Globals protection reading an undeclared global variable} {
+ catch {run_script {return a} 0} e
+ set e
+ } {ERR *attempted to access * global*}
+
+ test {Globals protection setting an undeclared global*} {
+ catch {run_script {a=10} 0} e
+ set e
+ } {ERR *Attempt to modify a readonly table*}
+
+ test {Test an example script DECR_IF_GT} {
+ set decr_if_gt {
+ local current
+
+ current = redis.call('get',KEYS[1])
+ if not current then return nil end
+ if current > ARGV[1] then
+ return redis.call('decr',KEYS[1])
+ else
+ return redis.call('get',KEYS[1])
+ end
+ }
+ r set foo 5
+ set res {}
+ lappend res [run_script $decr_if_gt 1 foo 2]
+ lappend res [run_script $decr_if_gt 1 foo 2]
+ lappend res [run_script $decr_if_gt 1 foo 2]
+ lappend res [run_script $decr_if_gt 1 foo 2]
+ lappend res [run_script $decr_if_gt 1 foo 2]
+ set res
+ } {4 3 2 2 2}
+
+ if {$is_eval eq 1} {
+ # random handling is only relevant for is_eval Lua
+ test {random numbers are random now} {
+ set rand1 [r eval {return tostring(math.random())} 0]
+ wait_for_condition 100 1 {
+ $rand1 ne [r eval {return tostring(math.random())} 0]
+ } else {
+ fail "random numbers should be random, now it's fixed value"
+ }
+ }
+
+ test {Scripting engine PRNG can be seeded correctly} {
+ set rand1 [r eval {
+ math.randomseed(ARGV[1]); return tostring(math.random())
+ } 0 10]
+ set rand2 [r eval {
+ math.randomseed(ARGV[1]); return tostring(math.random())
+ } 0 10]
+ set rand3 [r eval {
+ math.randomseed(ARGV[1]); return tostring(math.random())
+ } 0 20]
+ assert_equal $rand1 $rand2
+ assert {$rand2 ne $rand3}
+ }
+ } ;# is_eval
+
+ test {EVAL does not leak in the Lua stack} {
+ r script flush ;# reset Lua VM
+ r set x 0
+ # Use a non blocking client to speedup the loop.
+ set rd [redis_deferring_client]
+ for {set j 0} {$j < 10000} {incr j} {
+ run_script_on_connection $rd {return redis.call("incr",KEYS[1])} 1 x
+ }
+ for {set j 0} {$j < 10000} {incr j} {
+ $rd read
+ }
+ assert {[s used_memory_lua] < 1024*100}
+ $rd close
+ r get x
+ } {10000}
+
+ if {$is_eval eq 1} {
+ test {SPOP: We can call scripts rewriting client->argv from Lua} {
+ set repl [attach_to_replication_stream]
+ #this sadd operation is for external-cluster test. If myset doesn't exist, 'del myset' won't get propagated.
+ r sadd myset ppp
+ r del myset
+ r sadd myset a b c
+ assert {[r eval {return redis.call('spop', 'myset')} 0] ne {}}
+ assert {[r eval {return redis.call('spop', 'myset', 1)} 0] ne {}}
+ assert {[r eval {return redis.call('spop', KEYS[1])} 1 myset] ne {}}
+ # this one below should not be replicated
+ assert {[r eval {return redis.call('spop', KEYS[1])} 1 myset] eq {}}
+ r set trailingkey 1
+ assert_replication_stream $repl {
+ {select *}
+ {sadd *}
+ {del *}
+ {sadd *}
+ {srem myset *}
+ {srem myset *}
+ {srem myset *}
+ {set *}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test {MGET: mget shouldn't be propagated in Lua} {
+ set repl [attach_to_replication_stream]
+ r mset a{t} 1 b{t} 2 c{t} 3 d{t} 4
+ #read-only, won't be replicated
+ assert {[r eval {return redis.call('mget', 'a{t}', 'b{t}', 'c{t}', 'd{t}')} 0] eq {1 2 3 4}}
+ r set trailingkey 2
+ assert_replication_stream $repl {
+ {select *}
+ {mset *}
+ {set *}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test {EXPIRE: We can call scripts rewriting client->argv from Lua} {
+ set repl [attach_to_replication_stream]
+ r set expirekey 1
+ #should be replicated as EXPIREAT
+ assert {[r eval {return redis.call('expire', KEYS[1], ARGV[1])} 1 expirekey 3] eq 1}
+
+ assert_replication_stream $repl {
+ {select *}
+ {set *}
+ {pexpireat expirekey *}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test {INCRBYFLOAT: We can call scripts expanding client->argv from Lua} {
+ # coverage for scripts calling commands that expand the argv array
+ # an attempt to add coverage for a possible bug in luaArgsToRedisArgv
+ # this test needs a fresh server so that lua_argv_size is 0.
+ # glibc realloc can return the same pointer even when the size changes
+ # still this test isn't able to trigger the issue, but we keep it anyway.
+ start_server {tags {"scripting"}} {
+ set repl [attach_to_replication_stream]
+ # a command with 5 argsument
+ r eval {redis.call('hmget', KEYS[1], 1, 2, 3)} 1 key
+ # then a command with 3 that is replicated as one with 4
+ r eval {redis.call('incrbyfloat', KEYS[1], 1)} 1 key
+ # then a command with 4 args
+ r eval {redis.call('set', KEYS[1], '1', 'KEEPTTL')} 1 key
+
+ assert_replication_stream $repl {
+ {select *}
+ {set key 1 KEEPTTL}
+ {set key 1 KEEPTTL}
+ }
+ close_replication_stream $repl
+ }
+ } {} {needs:repl}
+
+ } ;# is_eval
+
+ test {Call Redis command with many args from Lua (issue #1764)} {
+ run_script {
+ local i
+ local x={}
+ redis.call('del','mylist')
+ for i=1,100 do
+ table.insert(x,i)
+ end
+ redis.call('rpush','mylist',unpack(x))
+ return redis.call('lrange','mylist',0,-1)
+ } 1 mylist
+ } {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100}
+
+ test {Number conversion precision test (issue #1118)} {
+ run_script {
+ local value = 9007199254740991
+ redis.call("set","foo",value)
+ return redis.call("get","foo")
+ } 1 foo
+ } {9007199254740991}
+
+ test {String containing number precision test (regression of issue #1118)} {
+ run_script {
+ redis.call("set", "key", "12039611435714932082")
+ return redis.call("get", "key")
+ } 1 key
+ } {12039611435714932082}
+
+ test {Verify negative arg count is error instead of crash (issue #1842)} {
+ catch { run_script { return "hello" } -12 } e
+ set e
+ } {ERR Number of keys can't be negative}
+
+ test {Scripts can handle commands with incorrect arity} {
+ assert_error "ERR Wrong number of args calling Redis command from script*" {run_script "redis.call('set','invalid')" 0}
+ assert_error "ERR Wrong number of args calling Redis command from script*" {run_script "redis.call('incr')" 0}
+ }
+
+ test {Correct handling of reused argv (issue #1939)} {
+ run_script {
+ for i = 0, 10 do
+ redis.call('SET', 'a{t}', '1')
+ redis.call('MGET', 'a{t}', 'b{t}', 'c{t}')
+ redis.call('EXPIRE', 'a{t}', 0)
+ redis.call('GET', 'a{t}')
+ redis.call('MGET', 'a{t}', 'b{t}', 'c{t}')
+ end
+ } 3 a{t} b{t} c{t}
+ }
+
+ test {Functions in the Redis namespace are able to report errors} {
+ catch {
+ run_script {
+ redis.sha1hex()
+ } 0
+ } e
+ set e
+ } {*wrong number*}
+
+ test {CLUSTER RESET can not be invoke from within a script} {
+ catch {
+ run_script {
+ redis.call('cluster', 'reset', 'hard')
+ } 0
+ } e
+ set _ $e
+ } {*command is not allowed*}
+
+ test {Script with RESP3 map} {
+ set expected_dict [dict create field value]
+ set expected_list [list field value]
+
+ # Sanity test for RESP3 without scripts
+ r HELLO 3
+ r hset hash field value
+ set res [r hgetall hash]
+ assert_equal $res $expected_dict
+
+ # Test RESP3 client with script in both RESP2 and RESP3 modes
+ set res [run_script {redis.setresp(3); return redis.call('hgetall', KEYS[1])} 1 hash]
+ assert_equal $res $expected_dict
+ set res [run_script {redis.setresp(2); return redis.call('hgetall', KEYS[1])} 1 hash]
+ assert_equal $res $expected_list
+
+ # Test RESP2 client with script in both RESP2 and RESP3 modes
+ r HELLO 2
+ set res [run_script {redis.setresp(3); return redis.call('hgetall', KEYS[1])} 1 hash]
+ assert_equal $res $expected_list
+ set res [run_script {redis.setresp(2); return redis.call('hgetall', KEYS[1])} 1 hash]
+ assert_equal $res $expected_list
+ } {} {resp3}
+
+ if {!$::log_req_res} { # this test creates a huge nested array which python can't handle (RecursionError: maximum recursion depth exceeded in comparison)
+ test {Script return recursive object} {
+ r readraw 1
+ set res [run_script {local a = {}; local b = {a}; a[1] = b; return a} 0]
+ # drain the response
+ while {true} {
+ if {$res == "-ERR reached lua stack limit"} {
+ break
+ }
+ assert_equal $res "*1"
+ set res [r read]
+ }
+ r readraw 0
+ # make sure the connection is still valid
+ assert_equal [r ping] {PONG}
+ }
+ }
+
+ test {Script check unpack with massive arguments} {
+ run_script {
+ local a = {}
+ for i=1,7999 do
+ a[i] = 1
+ end
+ return redis.call("lpush", "l", unpack(a))
+ } 1 l
+ } {7999}
+
+ test "Script read key with expiration set" {
+ r SET key value EX 10
+ assert_equal [run_script {
+ if redis.call("EXISTS", "key") then
+ return redis.call("GET", "key")
+ else
+ return redis.call("EXISTS", "key")
+ end
+ } 1 key] "value"
+ }
+
+ test "Script del key with expiration set" {
+ r SET key value EX 10
+ assert_equal [run_script {
+ redis.call("DEL", "key")
+ return redis.call("EXISTS", "key")
+ } 1 key] 0
+ }
+
+ test "Script ACL check" {
+ r acl setuser bob on {>123} {+@scripting} {+set} {~x*}
+ assert_equal [r auth bob 123] {OK}
+
+ # Check permission granted
+ assert_equal [run_script {
+ return redis.acl_check_cmd('set','xx',1)
+ } 1 xx] 1
+
+ # Check permission denied unauthorised command
+ assert_equal [run_script {
+ return redis.acl_check_cmd('hset','xx','f',1)
+ } 1 xx] {}
+
+ # Check permission denied unauthorised key
+ # Note: we don't pass the "yy" key as an argument to the script so key acl checks won't block the script
+ assert_equal [run_script {
+ return redis.acl_check_cmd('set','yy',1)
+ } 0] {}
+
+ # Check error due to invalid command
+ assert_error {ERR *Invalid command passed to redis.acl_check_cmd()*} {run_script {
+ return redis.acl_check_cmd('invalid-cmd','arg')
+ } 0}
+ }
+
+ test "Binary code loading failed" {
+ assert_error {ERR *attempt to call a nil value*} {run_script {
+ return loadstring(string.dump(function() return 1 end))()
+ } 0}
+ }
+
+ test "Try trick global protection 1" {
+ catch {
+ run_script {
+ setmetatable(_G, {})
+ } 0
+ } e
+ set _ $e
+ } {*Attempt to modify a readonly table*}
+
+ test "Try trick global protection 2" {
+ catch {
+ run_script {
+ local g = getmetatable(_G)
+ g.__index = {}
+ } 0
+ } e
+ set _ $e
+ } {*Attempt to modify a readonly table*}
+
+ test "Try trick global protection 3" {
+ catch {
+ run_script {
+ redis = function() return 1 end
+ } 0
+ } e
+ set _ $e
+ } {*Attempt to modify a readonly table*}
+
+ test "Try trick global protection 4" {
+ catch {
+ run_script {
+ _G = {}
+ } 0
+ } e
+ set _ $e
+ } {*Attempt to modify a readonly table*}
+
+ test "Try trick readonly table on redis table" {
+ catch {
+ run_script {
+ redis.call = function() return 1 end
+ } 0
+ } e
+ set _ $e
+ } {*Attempt to modify a readonly table*}
+
+ test "Try trick readonly table on json table" {
+ catch {
+ run_script {
+ cjson.encode = function() return 1 end
+ } 0
+ } e
+ set _ $e
+ } {*Attempt to modify a readonly table*}
+
+ test "Try trick readonly table on cmsgpack table" {
+ catch {
+ run_script {
+ cmsgpack.pack = function() return 1 end
+ } 0
+ } e
+ set _ $e
+ } {*Attempt to modify a readonly table*}
+
+ test "Try trick readonly table on bit table" {
+ catch {
+ run_script {
+ bit.lshift = function() return 1 end
+ } 0
+ } e
+ set _ $e
+ } {*Attempt to modify a readonly table*}
+
+ test "Test loadfile are not available" {
+ catch {
+ run_script {
+ loadfile('some file')
+ } 0
+ } e
+ set _ $e
+ } {*Script attempted to access nonexistent global variable 'loadfile'*}
+
+ test "Test dofile are not available" {
+ catch {
+ run_script {
+ dofile('some file')
+ } 0
+ } e
+ set _ $e
+ } {*Script attempted to access nonexistent global variable 'dofile'*}
+
+ test "Test print are not available" {
+ catch {
+ run_script {
+ print('some data')
+ } 0
+ } e
+ set _ $e
+ } {*Script attempted to access nonexistent global variable 'print'*}
+}
+
+# Start a new server since the last test in this stanza will kill the
+# instance at all.
+start_server {tags {"scripting"}} {
+ test {Timedout read-only scripts can be killed by SCRIPT KILL} {
+ set rd [redis_deferring_client]
+ r config set lua-time-limit 10
+ run_script_on_connection $rd {while true do end} 0
+ after 200
+ catch {r ping} e
+ assert_match {BUSY*} $e
+ kill_script
+ after 200 ; # Give some time to Lua to call the hook again...
+ assert_equal [r ping] "PONG"
+ $rd close
+ }
+
+ test {Timedout read-only scripts can be killed by SCRIPT KILL even when use pcall} {
+ set rd [redis_deferring_client]
+ r config set lua-time-limit 10
+ run_script_on_connection $rd {local f = function() while 1 do redis.call('ping') end end while 1 do pcall(f) end} 0
+
+ wait_for_condition 50 100 {
+ [catch {r ping} e] == 1
+ } else {
+ fail "Can't wait for script to start running"
+ }
+ catch {r ping} e
+ assert_match {BUSY*} $e
+
+ kill_script
+
+ wait_for_condition 50 100 {
+ [catch {r ping} e] == 0
+ } else {
+ fail "Can't wait for script to be killed"
+ }
+ assert_equal [r ping] "PONG"
+
+ catch {$rd read} res
+ $rd close
+
+ assert_match {*killed by user*} $res
+ }
+
+ test {Timedout script does not cause a false dead client} {
+ set rd [redis_deferring_client]
+ r config set lua-time-limit 10
+
+ # senging (in a pipeline):
+ # 1. eval "while 1 do redis.call('ping') end" 0
+ # 2. ping
+ if {$is_eval == 1} {
+ set buf "*3\r\n\$4\r\neval\r\n\$33\r\nwhile 1 do redis.call('ping') end\r\n\$1\r\n0\r\n"
+ append buf "*1\r\n\$4\r\nping\r\n"
+ } else {
+ set buf "*4\r\n\$8\r\nfunction\r\n\$4\r\nload\r\n\$7\r\nreplace\r\n\$97\r\n#!lua name=test\nredis.register_function('test', function() while 1 do redis.call('ping') end end)\r\n"
+ append buf "*3\r\n\$5\r\nfcall\r\n\$4\r\ntest\r\n\$1\r\n0\r\n"
+ append buf "*1\r\n\$4\r\nping\r\n"
+ }
+ $rd write $buf
+ $rd flush
+
+ wait_for_condition 50 100 {
+ [catch {r ping} e] == 1
+ } else {
+ fail "Can't wait for script to start running"
+ }
+ catch {r ping} e
+ assert_match {BUSY*} $e
+
+ kill_script
+ wait_for_condition 50 100 {
+ [catch {r ping} e] == 0
+ } else {
+ fail "Can't wait for script to be killed"
+ }
+ assert_equal [r ping] "PONG"
+
+ if {$is_eval == 0} {
+ # read the function name
+ assert_match {test} [$rd read]
+ }
+
+ catch {$rd read} res
+ assert_match {*killed by user*} $res
+
+ set res [$rd read]
+ assert_match {*PONG*} $res
+
+ $rd close
+ }
+
+ test {Timedout script link is still usable after Lua returns} {
+ r config set lua-time-limit 10
+ run_script {for i=1,100000 do redis.call('ping') end return 'ok'} 0
+ r ping
+ } {PONG}
+
+ test {Timedout scripts and unblocked command} {
+ # make sure a command that's allowed during BUSY doesn't trigger an unblocked command
+
+ # enable AOF to also expose an assertion if the bug would happen
+ r flushall
+ r config set appendonly yes
+
+ # create clients, and set one to block waiting for key 'x'
+ set rd [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+ set r3 [redis_client]
+ $rd2 blpop x 0
+ wait_for_blocked_clients_count 1
+
+ # hack: allow the script to use client list command so that we can control when it aborts
+ r DEBUG set-disable-deny-scripts 1
+ r config set lua-time-limit 10
+ run_script_on_connection $rd {
+ local clients
+ redis.call('lpush',KEYS[1],'y');
+ while true do
+ clients = redis.call('client','list')
+ if string.find(clients, 'abortscript') ~= nil then break end
+ end
+ redis.call('lpush',KEYS[1],'z');
+ return clients
+ } 1 x
+
+ # wait for the script to be busy
+ after 200
+ catch {r ping} e
+ assert_match {BUSY*} $e
+
+ # run cause the script to abort, and run a command that could have processed
+ # unblocked clients (due to a bug)
+ $r3 hello 2 setname abortscript
+
+ # make sure the script completed before the pop was processed
+ assert_equal [$rd2 read] {x z}
+ assert_match {*abortscript*} [$rd read]
+
+ $rd close
+ $rd2 close
+ $r3 close
+ r DEBUG set-disable-deny-scripts 0
+ } {OK} {external:skip needs:debug}
+
+ test {Timedout scripts that modified data can't be killed by SCRIPT KILL} {
+ set rd [redis_deferring_client]
+ r config set lua-time-limit 10
+ run_script_on_connection $rd {redis.call('set',KEYS[1],'y'); while true do end} 1 x
+ after 200
+ catch {r ping} e
+ assert_match {BUSY*} $e
+ catch {kill_script} e
+ assert_match {UNKILLABLE*} $e
+ catch {r ping} e
+ assert_match {BUSY*} $e
+ } {} {external:skip}
+
+ # Note: keep this test at the end of this server stanza because it
+ # kills the server.
+ test {SHUTDOWN NOSAVE can kill a timedout script anyway} {
+ # The server should be still unresponding to normal commands.
+ catch {r ping} e
+ assert_match {BUSY*} $e
+ catch {r shutdown nosave}
+ # Make sure the server was killed
+ catch {set rd [redis_deferring_client]} e
+ assert_match {*connection refused*} $e
+ } {} {external:skip}
+}
+
+ start_server {tags {"scripting repl needs:debug external:skip"}} {
+ start_server {} {
+ test "Before the replica connects we issue two EVAL commands" {
+ # One with an error, but still executing a command.
+ # SHA is: 67164fc43fa971f76fd1aaeeaf60c1c178d25876
+ catch {
+ run_script {redis.call('incr',KEYS[1]); redis.call('nonexisting')} 1 x
+ }
+ # One command is correct:
+ # SHA is: 6f5ade10a69975e903c6d07b10ea44c6382381a5
+ run_script {return redis.call('incr',KEYS[1])} 1 x
+ } {2}
+
+ test "Connect a replica to the master instance" {
+ r -1 slaveof [srv 0 host] [srv 0 port]
+ wait_for_condition 50 100 {
+ [s -1 role] eq {slave} &&
+ [string match {*master_link_status:up*} [r -1 info replication]]
+ } else {
+ fail "Can't turn the instance into a replica"
+ }
+ }
+
+ if {$is_eval eq 1} {
+ test "Now use EVALSHA against the master, with both SHAs" {
+ # The server should replicate successful and unsuccessful
+ # commands as EVAL instead of EVALSHA.
+ catch {
+ r evalsha 67164fc43fa971f76fd1aaeeaf60c1c178d25876 1 x
+ }
+ r evalsha 6f5ade10a69975e903c6d07b10ea44c6382381a5 1 x
+ } {4}
+
+ test "'x' should be '4' for EVALSHA being replicated by effects" {
+ wait_for_condition 50 100 {
+ [r -1 get x] eq {4}
+ } else {
+ fail "Expected 4 in x, but value is '[r -1 get x]'"
+ }
+ }
+ } ;# is_eval
+
+ test "Replication of script multiple pushes to list with BLPOP" {
+ set rd [redis_deferring_client]
+ $rd brpop a 0
+ run_script {
+ redis.call("lpush",KEYS[1],"1");
+ redis.call("lpush",KEYS[1],"2");
+ } 1 a
+ set res [$rd read]
+ $rd close
+ wait_for_condition 50 100 {
+ [r -1 lrange a 0 -1] eq [r lrange a 0 -1]
+ } else {
+ fail "Expected list 'a' in replica and master to be the same, but they are respectively '[r -1 lrange a 0 -1]' and '[r lrange a 0 -1]'"
+ }
+ set res
+ } {a 1}
+
+ if {$is_eval eq 1} {
+ test "EVALSHA replication when first call is readonly" {
+ r del x
+ r eval {if tonumber(ARGV[1]) > 0 then redis.call('incr', KEYS[1]) end} 1 x 0
+ r evalsha 6e0e2745aa546d0b50b801a20983b70710aef3ce 1 x 0
+ r evalsha 6e0e2745aa546d0b50b801a20983b70710aef3ce 1 x 1
+ wait_for_condition 50 100 {
+ [r -1 get x] eq {1}
+ } else {
+ fail "Expected 1 in x, but value is '[r -1 get x]'"
+ }
+ }
+ } ;# is_eval
+
+ test "Lua scripts using SELECT are replicated correctly" {
+ run_script {
+ redis.call("set","foo1","bar1")
+ redis.call("select","10")
+ redis.call("incr","x")
+ redis.call("select","11")
+ redis.call("incr","z")
+ } 3 foo1 x z
+ run_script {
+ redis.call("set","foo1","bar1")
+ redis.call("select","10")
+ redis.call("incr","x")
+ redis.call("select","11")
+ redis.call("incr","z")
+ } 3 foo1 x z
+ wait_for_condition 50 100 {
+ [debug_digest -1] eq [debug_digest]
+ } else {
+ fail "Master-Replica desync after Lua script using SELECT."
+ }
+ } {} {singledb:skip}
+ }
+ }
+
+start_server {tags {"scripting repl external:skip"}} {
+ start_server {overrides {appendonly yes aof-use-rdb-preamble no}} {
+ test "Connect a replica to the master instance" {
+ r -1 slaveof [srv 0 host] [srv 0 port]
+ wait_for_condition 50 100 {
+ [s -1 role] eq {slave} &&
+ [string match {*master_link_status:up*} [r -1 info replication]]
+ } else {
+ fail "Can't turn the instance into a replica"
+ }
+ }
+
+ # replicate_commands is the default on Redis Function
+ test "Redis.replicate_commands() can be issued anywhere now" {
+ r eval {
+ redis.call('set','foo','bar');
+ return redis.replicate_commands();
+ } 0
+ } {1}
+
+ test "Redis.set_repl() can be issued before replicate_commands() now" {
+ catch {
+ r eval {
+ redis.set_repl(redis.REPL_ALL);
+ } 0
+ } e
+ set e
+ } {}
+
+ test "Redis.set_repl() don't accept invalid values" {
+ catch {
+ run_script {
+ redis.set_repl(12345);
+ } 0
+ } e
+ set e
+ } {*Invalid*flags*}
+
+ test "Test selective replication of certain Redis commands from Lua" {
+ r del a b c d
+ run_script {
+ redis.call('set','a','1');
+ redis.set_repl(redis.REPL_NONE);
+ redis.call('set','b','2');
+ redis.set_repl(redis.REPL_AOF);
+ redis.call('set','c','3');
+ redis.set_repl(redis.REPL_ALL);
+ redis.call('set','d','4');
+ } 4 a b c d
+
+ wait_for_condition 50 100 {
+ [r -1 mget a b c d] eq {1 {} {} 4}
+ } else {
+ fail "Only a and d should be replicated to replica"
+ }
+
+ # Master should have everything right now
+ assert {[r mget a b c d] eq {1 2 3 4}}
+
+ # After an AOF reload only a, c and d should exist
+ r debug loadaof
+
+ assert {[r mget a b c d] eq {1 {} 3 4}}
+ }
+
+ test "PRNG is seeded randomly for command replication" {
+ if {$is_eval eq 1} {
+ # on is_eval Lua we need to call redis.replicate_commands() to get real randomization
+ set a [
+ run_script {
+ redis.replicate_commands()
+ return math.random()*100000;
+ } 0
+ ]
+ set b [
+ run_script {
+ redis.replicate_commands()
+ return math.random()*100000;
+ } 0
+ ]
+ } else {
+ set a [
+ run_script {
+ return math.random()*100000;
+ } 0
+ ]
+ set b [
+ run_script {
+ return math.random()*100000;
+ } 0
+ ]
+ }
+ assert {$a ne $b}
+ }
+
+ test "Using side effects is not a problem with command replication" {
+ run_script {
+ redis.call('set','time',redis.call('time')[1])
+ } 0
+
+ assert {[r get time] ne {}}
+
+ wait_for_condition 50 100 {
+ [r get time] eq [r -1 get time]
+ } else {
+ fail "Time key does not match between master and replica"
+ }
+ }
+ }
+}
+
+if {$is_eval eq 1} {
+start_server {tags {"scripting external:skip"}} {
+ r script debug sync
+ r eval {return 'hello'} 0
+ r eval {return 'hello'} 0
+}
+
+start_server {tags {"scripting needs:debug external:skip"}} {
+ test {Test scripting debug protocol parsing} {
+ r script debug sync
+ r eval {return 'hello'} 0
+ catch {r 'hello\0world'} e
+ assert_match {*Unknown Redis Lua debugger command*} $e
+ catch {r 'hello\0'} e
+ assert_match {*Unknown Redis Lua debugger command*} $e
+ catch {r '\0hello'} e
+ assert_match {*Unknown Redis Lua debugger command*} $e
+ catch {r '\0hello\0'} e
+ assert_match {*Unknown Redis Lua debugger command*} $e
+ }
+
+ test {Test scripting debug lua stack overflow} {
+ r script debug sync
+ r eval {return 'hello'} 0
+ set cmd "*101\r\n\$5\r\nredis\r\n"
+ append cmd [string repeat "\$4\r\ntest\r\n" 100]
+ r write $cmd
+ r flush
+ set ret [r read]
+ assert_match {*Unknown Redis command called from script*} $ret
+ # make sure the server is still ok
+ reconnect
+ assert_equal [r ping] {PONG}
+ }
+}
+} ;# is_eval
+
+start_server {tags {"scripting needs:debug"}} {
+ r debug set-disable-deny-scripts 1
+
+ for {set i 2} {$i <= 3} {incr i} {
+ for {set client_proto 2} {$client_proto <= 3} {incr client_proto} {
+ if {[lsearch $::denytags "resp3"] >= 0} {
+ if {$client_proto == 3} {continue}
+ } elseif {$::force_resp3} {
+ if {$client_proto == 2} {continue}
+ }
+ r hello $client_proto
+ set extra "RESP$i/$client_proto"
+ r readraw 1
+
+ test "test $extra big number protocol parsing" {
+ set ret [run_script "redis.setresp($i);return redis.call('debug', 'protocol', 'bignum')" 0]
+ if {$client_proto == 2 || $i == 2} {
+ # if either Lua or the client is RESP2 the reply will be RESP2
+ assert_equal $ret {$37}
+ assert_equal [r read] {1234567999999999999999999999999999999}
+ } else {
+ assert_equal $ret {(1234567999999999999999999999999999999}
+ }
+ }
+
+ test "test $extra malformed big number protocol parsing" {
+ set ret [run_script "return {big_number='123\\r\\n123'}" 0]
+ if {$client_proto == 2} {
+ # if either Lua or the client is RESP2 the reply will be RESP2
+ assert_equal $ret {$8}
+ assert_equal [r read] {123 123}
+ } else {
+ assert_equal $ret {(123 123}
+ }
+ }
+
+ test "test $extra map protocol parsing" {
+ set ret [run_script "redis.setresp($i);return redis.call('debug', 'protocol', 'map')" 0]
+ if {$client_proto == 2 || $i == 2} {
+ # if either Lua or the client is RESP2 the reply will be RESP2
+ assert_equal $ret {*6}
+ } else {
+ assert_equal $ret {%3}
+ }
+ for {set j 0} {$j < 6} {incr j} {
+ r read
+ }
+ }
+
+ test "test $extra set protocol parsing" {
+ set ret [run_script "redis.setresp($i);return redis.call('debug', 'protocol', 'set')" 0]
+ if {$client_proto == 2 || $i == 2} {
+ # if either Lua or the client is RESP2 the reply will be RESP2
+ assert_equal $ret {*3}
+ } else {
+ assert_equal $ret {~3}
+ }
+ for {set j 0} {$j < 3} {incr j} {
+ r read
+ }
+ }
+
+ test "test $extra double protocol parsing" {
+ set ret [run_script "redis.setresp($i);return redis.call('debug', 'protocol', 'double')" 0]
+ if {$client_proto == 2 || $i == 2} {
+ # if either Lua or the client is RESP2 the reply will be RESP2
+ assert_equal $ret {$5}
+ assert_equal [r read] {3.141}
+ } else {
+ assert_equal $ret {,3.141}
+ }
+ }
+
+ test "test $extra null protocol parsing" {
+ set ret [run_script "redis.setresp($i);return redis.call('debug', 'protocol', 'null')" 0]
+ if {$client_proto == 2} {
+ # null is a special case in which a Lua client format does not effect the reply to the client
+ assert_equal $ret {$-1}
+ } else {
+ assert_equal $ret {_}
+ }
+ } {}
+
+ test "test $extra verbatim protocol parsing" {
+ set ret [run_script "redis.setresp($i);return redis.call('debug', 'protocol', 'verbatim')" 0]
+ if {$client_proto == 2 || $i == 2} {
+ # if either Lua or the client is RESP2 the reply will be RESP2
+ assert_equal $ret {$25}
+ assert_equal [r read] {This is a verbatim}
+ assert_equal [r read] {string}
+ } else {
+ assert_equal $ret {=29}
+ assert_equal [r read] {txt:This is a verbatim}
+ assert_equal [r read] {string}
+ }
+ }
+
+ test "test $extra true protocol parsing" {
+ set ret [run_script "redis.setresp($i);return redis.call('debug', 'protocol', 'true')" 0]
+ if {$client_proto == 2 || $i == 2} {
+ # if either Lua or the client is RESP2 the reply will be RESP2
+ assert_equal $ret {:1}
+ } else {
+ assert_equal $ret {#t}
+ }
+ }
+
+ test "test $extra false protocol parsing" {
+ set ret [run_script "redis.setresp($i);return redis.call('debug', 'protocol', 'false')" 0]
+ if {$client_proto == 2 || $i == 2} {
+ # if either Lua or the client is RESP2 the reply will be RESP2
+ assert_equal $ret {:0}
+ } else {
+ assert_equal $ret {#f}
+ }
+ }
+
+ r readraw 0
+ r hello 2
+ }
+ }
+
+ # attribute is not relevant to test with resp2
+ test {test resp3 attribute protocol parsing} {
+ # attributes are not (yet) expose to the script
+ # So here we just check the parser handles them and they are ignored.
+ run_script "redis.setresp(3);return redis.call('debug', 'protocol', 'attrib')" 0
+ } {Some real reply following the attribute}
+
+ test "Script block the time during execution" {
+ assert_equal [run_script {
+ redis.call("SET", "key", "value", "PX", "1")
+ redis.call("DEBUG", "SLEEP", 0.01)
+ return redis.call("EXISTS", "key")
+ } 1 key] 1
+
+ assert_equal 0 [r EXISTS key]
+ }
+
+ test "Script delete the expired key" {
+ r DEBUG set-active-expire 0
+ r SET key value PX 1
+ after 2
+
+ # use DEBUG OBJECT to make sure it doesn't error (means the key still exists)
+ r DEBUG OBJECT key
+
+ assert_equal [run_script {return redis.call('EXISTS', 'key')} 1 key] 0
+ assert_equal 0 [r EXISTS key]
+ r DEBUG set-active-expire 1
+ }
+
+ test "TIME command using cached time" {
+ set res [run_script {
+ local result1 = {redis.call("TIME")}
+ redis.call("DEBUG", "SLEEP", 0.01)
+ local result2 = {redis.call("TIME")}
+ return {result1, result2}
+ } 0]
+ assert_equal [lindex $res 0] [lindex $res 1]
+ }
+
+ test "Script block the time in some expiration related commands" {
+ # The test uses different commands to set the "same" expiration time for different keys,
+ # and interspersed with "DEBUG SLEEP", to verify that time is frozen in script.
+ # The commands involved are [P]TTL / SET EX[PX] / [P]EXPIRE / GETEX / [P]SETEX / [P]EXPIRETIME
+ set res [run_script {
+ redis.call("SET", "key1{t}", "value", "EX", 1)
+ redis.call("DEBUG", "SLEEP", 0.01)
+
+ redis.call("SET", "key2{t}", "value", "PX", 1000)
+ redis.call("DEBUG", "SLEEP", 0.01)
+
+ redis.call("SET", "key3{t}", "value")
+ redis.call("EXPIRE", "key3{t}", 1)
+ redis.call("DEBUG", "SLEEP", 0.01)
+
+ redis.call("SET", "key4{t}", "value")
+ redis.call("PEXPIRE", "key4{t}", 1000)
+ redis.call("DEBUG", "SLEEP", 0.01)
+
+ redis.call("SETEX", "key5{t}", 1, "value")
+ redis.call("DEBUG", "SLEEP", 0.01)
+
+ redis.call("PSETEX", "key6{t}", 1000, "value")
+ redis.call("DEBUG", "SLEEP", 0.01)
+
+ redis.call("SET", "key7{t}", "value")
+ redis.call("GETEX", "key7{t}", "EX", 1)
+ redis.call("DEBUG", "SLEEP", 0.01)
+
+ redis.call("SET", "key8{t}", "value")
+ redis.call("GETEX", "key8{t}", "PX", 1000)
+ redis.call("DEBUG", "SLEEP", 0.01)
+
+ local ttl_results = {redis.call("TTL", "key1{t}"),
+ redis.call("TTL", "key2{t}"),
+ redis.call("TTL", "key3{t}"),
+ redis.call("TTL", "key4{t}"),
+ redis.call("TTL", "key5{t}"),
+ redis.call("TTL", "key6{t}"),
+ redis.call("TTL", "key7{t}"),
+ redis.call("TTL", "key8{t}")}
+
+ local pttl_results = {redis.call("PTTL", "key1{t}"),
+ redis.call("PTTL", "key2{t}"),
+ redis.call("PTTL", "key3{t}"),
+ redis.call("PTTL", "key4{t}"),
+ redis.call("PTTL", "key5{t}"),
+ redis.call("PTTL", "key6{t}"),
+ redis.call("PTTL", "key7{t}"),
+ redis.call("PTTL", "key8{t}")}
+
+ local expiretime_results = {redis.call("EXPIRETIME", "key1{t}"),
+ redis.call("EXPIRETIME", "key2{t}"),
+ redis.call("EXPIRETIME", "key3{t}"),
+ redis.call("EXPIRETIME", "key4{t}"),
+ redis.call("EXPIRETIME", "key5{t}"),
+ redis.call("EXPIRETIME", "key6{t}"),
+ redis.call("EXPIRETIME", "key7{t}"),
+ redis.call("EXPIRETIME", "key8{t}")}
+
+ local pexpiretime_results = {redis.call("PEXPIRETIME", "key1{t}"),
+ redis.call("PEXPIRETIME", "key2{t}"),
+ redis.call("PEXPIRETIME", "key3{t}"),
+ redis.call("PEXPIRETIME", "key4{t}"),
+ redis.call("PEXPIRETIME", "key5{t}"),
+ redis.call("PEXPIRETIME", "key6{t}"),
+ redis.call("PEXPIRETIME", "key7{t}"),
+ redis.call("PEXPIRETIME", "key8{t}")}
+
+ return {ttl_results, pttl_results, expiretime_results, pexpiretime_results}
+ } 8 key1{t} key2{t} key3{t} key4{t} key5{t} key6{t} key7{t} key8{t}]
+
+ # The elements in each list are equal.
+ assert_equal 1 [llength [lsort -unique [lindex $res 0]]]
+ assert_equal 1 [llength [lsort -unique [lindex $res 1]]]
+ assert_equal 1 [llength [lsort -unique [lindex $res 2]]]
+ assert_equal 1 [llength [lsort -unique [lindex $res 3]]]
+
+ # Then we check that the expiration time is set successfully.
+ assert_morethan [lindex $res 0] 0
+ assert_morethan [lindex $res 1] 0
+ assert_morethan [lindex $res 2] 0
+ assert_morethan [lindex $res 3] 0
+ }
+
+ test "RESTORE expired keys with expiration time" {
+ set res [run_script {
+ redis.call("SET", "key1{t}", "value")
+ local encoded = redis.call("DUMP", "key1{t}")
+
+ redis.call("RESTORE", "key2{t}", 1, encoded, "REPLACE")
+ redis.call("DEBUG", "SLEEP", 0.01)
+ redis.call("RESTORE", "key3{t}", 1, encoded, "REPLACE")
+
+ return {redis.call("PEXPIRETIME", "key2{t}"), redis.call("PEXPIRETIME", "key3{t}")}
+ } 3 key1{t} key2{t} key3{t}]
+
+ # Can get the expiration time and they are all equal.
+ assert_morethan [lindex $res 0] 0
+ assert_equal [lindex $res 0] [lindex $res 1]
+ }
+
+ r debug set-disable-deny-scripts 0
+}
+} ;# foreach is_eval
+
+
+# Scripting "shebang" notation tests
+start_server {tags {"scripting"}} {
+ test "Shebang support for lua engine" {
+ catch {
+ r eval {#!not-lua
+ return 1
+ } 0
+ } e
+ assert_match {*Unexpected engine in script shebang*} $e
+
+ assert_equal [r eval {#!lua
+ return 1
+ } 0] 1
+ }
+
+ test "Unknown shebang option" {
+ catch {
+ r eval {#!lua badger=data
+ return 1
+ } 0
+ } e
+ assert_match {*Unknown lua shebang option*} $e
+ }
+
+ test "Unknown shebang flag" {
+ catch {
+ r eval {#!lua flags=allow-oom,what?
+ return 1
+ } 0
+ } e
+ assert_match {*Unexpected flag in script shebang*} $e
+ }
+
+ test "allow-oom shebang flag" {
+ r set x 123
+
+ r config set maxmemory 1
+
+ # Fail to execute deny-oom command in OOM condition (backwards compatibility mode without flags)
+ assert_error {OOM command not allowed when used memory > 'maxmemory'*} {
+ r eval {
+ redis.call('set','x',1)
+ return 1
+ } 1 x
+ }
+ # Can execute non deny-oom commands in OOM condition (backwards compatibility mode without flags)
+ assert_equal [
+ r eval {
+ return redis.call('get','x')
+ } 1 x
+ ] {123}
+
+ # Fail to execute regardless of script content when we use default flags in OOM condition
+ assert_error {OOM *} {
+ r eval {#!lua flags=
+ return 1
+ } 0
+ }
+
+ # Script with allow-oom can write despite being in OOM state
+ assert_equal [
+ r eval {#!lua flags=allow-oom
+ redis.call('set','x',1)
+ return 1
+ } 1 x
+ ] 1
+
+ # read-only scripts implies allow-oom
+ assert_equal [
+ r eval {#!lua flags=no-writes
+ redis.call('get','x')
+ return 1
+ } 0
+ ] 1
+ assert_equal [
+ r eval_ro {#!lua flags=no-writes
+ redis.call('get','x')
+ return 1
+ } 1 x
+ ] 1
+
+ # Script with no shebang can read in OOM state
+ assert_equal [
+ r eval {
+ redis.call('get','x')
+ return 1
+ } 1 x
+ ] 1
+
+ # Script with no shebang can read in OOM state (eval_ro variant)
+ assert_equal [
+ r eval_ro {
+ redis.call('get','x')
+ return 1
+ } 1 x
+ ] 1
+
+ r config set maxmemory 0
+ } {OK} {needs:config-maxmemory}
+
+ test "no-writes shebang flag" {
+ assert_error {ERR Write commands are not allowed from read-only scripts*} {
+ r eval {#!lua flags=no-writes
+ redis.call('set','x',1)
+ return 1
+ } 1 x
+ }
+ }
+
+ start_server {tags {"external:skip"}} {
+ r -1 set x "some value"
+ test "no-writes shebang flag on replica" {
+ r replicaof [srv -1 host] [srv -1 port]
+ wait_for_condition 50 100 {
+ [s role] eq {slave} &&
+ [string match {*master_link_status:up*} [r info replication]]
+ } else {
+ fail "Can't turn the instance into a replica"
+ }
+
+ assert_equal [
+ r eval {#!lua flags=no-writes
+ return redis.call('get','x')
+ } 1 x
+ ] "some value"
+
+ assert_error {READONLY You can't write against a read only replica.} {
+ r eval {#!lua
+ return redis.call('get','x')
+ } 1 x
+ }
+
+ # test no-write inside multi-exec
+ r multi
+ r eval {#!lua flags=no-writes
+ redis.call('get','x')
+ return 1
+ } 1 x
+ assert_equal [r exec] 1
+
+ # test no shebang without write inside multi-exec
+ r multi
+ r eval {
+ redis.call('get','x')
+ return 1
+ } 1 x
+ assert_equal [r exec] 1
+
+ # temporarily set the server to master, so it doesn't block the queuing
+ # and we can test the evaluation of the flags on exec
+ r replicaof no one
+ set rr [redis_client]
+ set rr2 [redis_client]
+ $rr multi
+ $rr2 multi
+
+ # test write inside multi-exec
+ # we don't need to do any actual write
+ $rr eval {#!lua
+ return 1
+ } 0
+
+ # test no shebang with write inside multi-exec
+ $rr2 eval {
+ redis.call('set','x',1)
+ return 1
+ } 1 x
+
+ r replicaof [srv -1 host] [srv -1 port]
+ assert_error {EXECABORT Transaction discarded because of: READONLY *} {$rr exec}
+ assert_error {READONLY You can't write against a read only replica. script: *} {$rr2 exec}
+ $rr close
+ $rr2 close
+ }
+ }
+
+ test "not enough good replicas" {
+ r set x "some value"
+ r config set min-replicas-to-write 1
+
+ assert_equal [
+ r eval {#!lua flags=no-writes
+ return redis.call('get','x')
+ } 1 x
+ ] "some value"
+
+ assert_equal [
+ r eval {
+ return redis.call('get','x')
+ } 1 x
+ ] "some value"
+
+ assert_error {NOREPLICAS *} {
+ r eval {#!lua
+ return redis.call('get','x')
+ } 1 x
+ }
+
+ assert_error {NOREPLICAS *} {
+ r eval {
+ return redis.call('set','x', 1)
+ } 1 x
+ }
+
+ r config set min-replicas-to-write 0
+ }
+
+ test "not enough good replicas state change during long script" {
+ r set x "pre-script value"
+ r config set min-replicas-to-write 1
+ r config set lua-time-limit 10
+ start_server {tags {"external:skip"}} {
+ # add a replica and wait for the master to recognize it's online
+ r slaveof [srv -1 host] [srv -1 port]
+ wait_replica_online [srv -1 client]
+
+ # run a slow script that does one write, then waits for INFO to indicate
+ # that the replica dropped, and then runs another write
+ set rd [redis_deferring_client -1]
+ $rd eval {
+ redis.call('set','x',"script value")
+ while true do
+ local info = redis.call('info','replication')
+ if (string.match(info, "connected_slaves:0")) then
+ redis.call('set','x',info)
+ break
+ end
+ end
+ return 1
+ } 1 x
+
+ # wait for the script to time out and yield
+ wait_for_condition 100 100 {
+ [catch {r -1 ping} e] == 1
+ } else {
+ fail "Can't wait for script to start running"
+ }
+ catch {r -1 ping} e
+ assert_match {BUSY*} $e
+
+ # cause the replica to disconnect (triggering the busy script to exit)
+ r slaveof no one
+
+ # make sure the script was able to write after the replica dropped
+ assert_equal [$rd read] 1
+ assert_match {*connected_slaves:0*} [r -1 get x]
+
+ $rd close
+ }
+ r config set min-replicas-to-write 0
+ r config set lua-time-limit 5000
+ } {OK} {external:skip needs:repl}
+
+ test "allow-stale shebang flag" {
+ r config set replica-serve-stale-data no
+ r replicaof 127.0.0.1 1
+
+ assert_error {MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'.} {
+ r eval {
+ return redis.call('get','x')
+ } 1 x
+ }
+
+ assert_error {MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'.} {
+ r eval {#!lua flags=no-writes
+ return 1
+ } 0
+ }
+
+ assert_equal [
+ r eval {#!lua flags=allow-stale,no-writes
+ return 1
+ } 0
+ ] 1
+
+
+ assert_error {*Can not execute the command on a stale replica*} {
+ r eval {#!lua flags=allow-stale,no-writes
+ return redis.call('get','x')
+ } 1 x
+ }
+
+ assert_match {foobar} [
+ r eval {#!lua flags=allow-stale,no-writes
+ return redis.call('echo','foobar')
+ } 0
+ ]
+
+ # Test again with EVALSHA
+ set sha [
+ r script load {#!lua flags=allow-stale,no-writes
+ return redis.call('echo','foobar')
+ }
+ ]
+ assert_match {foobar} [r evalsha $sha 0]
+
+ r replicaof no one
+ r config set replica-serve-stale-data yes
+ set _ {}
+ } {} {external:skip}
+
+ test "reject script do not cause a Lua stack leak" {
+ r config set maxmemory 1
+ for {set i 0} {$i < 50} {incr i} {
+ assert_error {OOM *} {r eval {#!lua
+ return 1
+ } 0}
+ }
+ r config set maxmemory 0
+ assert_equal [r eval {#!lua
+ return 1
+ } 0] 1
+ }
+}
+
+# Additional eval only tests
+start_server {tags {"scripting"}} {
+ test "Consistent eval error reporting" {
+ r config resetstat
+ r config set maxmemory 1
+ # Script aborted due to Redis state (OOM) should report script execution error with detailed internal error
+ assert_error {OOM command not allowed when used memory > 'maxmemory'*} {
+ r eval {return redis.call('set','x','y')} 1 x
+ }
+ assert_equal [errorrstat OOM r] {count=1}
+ assert_equal [s total_error_replies] {1}
+ assert_match {calls=0*rejected_calls=1,failed_calls=0*} [cmdrstat set r]
+ assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat eval r]
+
+ # redis.pcall() failure due to Redis state (OOM) returns lua error table with Redis error message without '-' prefix
+ r config resetstat
+ assert_equal [
+ r eval {
+ local t = redis.pcall('set','x','y')
+ if t['err'] == "OOM command not allowed when used memory > 'maxmemory'." then
+ return 1
+ else
+ return 0
+ end
+ } 1 x
+ ] 1
+ # error stats were not incremented
+ assert_equal [errorrstat ERR r] {}
+ assert_equal [errorrstat OOM r] {count=1}
+ assert_equal [s total_error_replies] {1}
+ assert_match {calls=0*rejected_calls=1,failed_calls=0*} [cmdrstat set r]
+ assert_match {calls=1*rejected_calls=0,failed_calls=0*} [cmdrstat eval r]
+
+ # Returning an error object from lua is handled as a valid RESP error result.
+ r config resetstat
+ assert_error {OOM command not allowed when used memory > 'maxmemory'.} {
+ r eval { return redis.pcall('set','x','y') } 1 x
+ }
+ assert_equal [errorrstat ERR r] {}
+ assert_equal [errorrstat OOM r] {count=1}
+ assert_equal [s total_error_replies] {1}
+ assert_match {calls=0*rejected_calls=1,failed_calls=0*} [cmdrstat set r]
+ assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat eval r]
+
+ r config set maxmemory 0
+ r config resetstat
+ # Script aborted due to error result of Redis command
+ assert_error {ERR DB index is out of range*} {
+ r eval {return redis.call('select',99)} 0
+ }
+ assert_equal [errorrstat ERR r] {count=1}
+ assert_equal [s total_error_replies] {1}
+ assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat select r]
+ assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat eval r]
+
+ # redis.pcall() failure due to error in Redis command returns lua error table with redis error message without '-' prefix
+ r config resetstat
+ assert_equal [
+ r eval {
+ local t = redis.pcall('select',99)
+ if t['err'] == "ERR DB index is out of range" then
+ return 1
+ else
+ return 0
+ end
+ } 0
+ ] 1
+ assert_equal [errorrstat ERR r] {count=1} ;
+ assert_equal [s total_error_replies] {1}
+ assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat select r]
+ assert_match {calls=1*rejected_calls=0,failed_calls=0*} [cmdrstat eval r]
+
+ # Script aborted due to scripting specific error state (write cmd with eval_ro) should report script execution error with detailed internal error
+ r config resetstat
+ assert_error {ERR Write commands are not allowed from read-only scripts*} {
+ r eval_ro {return redis.call('set','x','y')} 1 x
+ }
+ assert_equal [errorrstat ERR r] {count=1}
+ assert_equal [s total_error_replies] {1}
+ assert_match {calls=0*rejected_calls=1,failed_calls=0*} [cmdrstat set r]
+ assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat eval_ro r]
+
+ # redis.pcall() failure due to scripting specific error state (write cmd with eval_ro) returns lua error table with Redis error message without '-' prefix
+ r config resetstat
+ assert_equal [
+ r eval_ro {
+ local t = redis.pcall('set','x','y')
+ if t['err'] == "ERR Write commands are not allowed from read-only scripts." then
+ return 1
+ else
+ return 0
+ end
+ } 1 x
+ ] 1
+ assert_equal [errorrstat ERR r] {count=1}
+ assert_equal [s total_error_replies] {1}
+ assert_match {calls=0*rejected_calls=1,failed_calls=0*} [cmdrstat set r]
+ assert_match {calls=1*rejected_calls=0,failed_calls=0*} [cmdrstat eval_ro r]
+
+ r config resetstat
+ # make sure geoadd will failed
+ r set Sicily 1
+ assert_error {WRONGTYPE Operation against a key holding the wrong kind of value*} {
+ r eval {return redis.call('GEOADD', 'Sicily', '13.361389', '38.115556', 'Palermo', '15.087269', '37.502669', 'Catania')} 1 x
+ }
+ assert_equal [errorrstat WRONGTYPE r] {count=1}
+ assert_equal [s total_error_replies] {1}
+ assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat geoadd r]
+ assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat eval r]
+ } {} {cluster:skip}
+
+ test "LUA redis.error_reply API" {
+ r config resetstat
+ assert_error {MY_ERR_CODE custom msg} {
+ r eval {return redis.error_reply("MY_ERR_CODE custom msg")} 0
+ }
+ assert_equal [errorrstat MY_ERR_CODE r] {count=1}
+ }
+
+ test "LUA redis.error_reply API with empty string" {
+ r config resetstat
+ assert_error {ERR} {
+ r eval {return redis.error_reply("")} 0
+ }
+ assert_equal [errorrstat ERR r] {count=1}
+ }
+
+ test "LUA redis.status_reply API" {
+ r config resetstat
+ r readraw 1
+ assert_equal [
+ r eval {return redis.status_reply("MY_OK_CODE custom msg")} 0
+ ] {+MY_OK_CODE custom msg}
+ r readraw 0
+ assert_equal [errorrstat MY_ERR_CODE r] {} ;# error stats were not incremented
+ }
+
+ test "LUA test pcall" {
+ assert_equal [
+ r eval {local status, res = pcall(function() return 1 end); return 'status: ' .. tostring(status) .. ' result: ' .. res} 0
+ ] {status: true result: 1}
+ }
+
+ test "LUA test pcall with error" {
+ assert_match {status: false result:*Script attempted to access nonexistent global variable 'foo'} [
+ r eval {local status, res = pcall(function() return foo end); return 'status: ' .. tostring(status) .. ' result: ' .. res} 0
+ ]
+ }
+
+ test "LUA test pcall with non string/integer arg" {
+ assert_error "ERR Lua redis lib command arguments must be strings or integers*" {
+ r eval {
+ local x={}
+ return redis.call("ping", x)
+ } 0
+ }
+ # run another command, to make sure the cached argv array survived
+ assert_equal [
+ r eval {
+ return redis.call("ping", "asdf")
+ } 0
+ ] {asdf}
+ }
+
+ test "LUA test trim string as expected" {
+ # this test may fail if we use different memory allocator than jemalloc, as libc for example may keep the old size on realloc.
+ if {[string match {*jemalloc*} [s mem_allocator]]} {
+ # test that when using LUA cache mechanism, if there is free space in the argv array, the string is trimmed.
+ r set foo [string repeat "a" 45]
+ set expected_memory [r memory usage foo]
+
+ # Jemalloc will allocate for the requested 63 bytes, 80 bytes.
+ # We can't test for larger sizes because LUA_CMD_OBJCACHE_MAX_LEN is 64.
+ # This value will be recycled to be used in the next argument.
+ # We use SETNX to avoid saving the string which will prevent us to reuse it in the next command.
+ r eval {
+ return redis.call("SETNX", "foo", string.rep("a", 63))
+ } 0
+
+ # Jemalloc will allocate for the request 45 bytes, 56 bytes.
+ # we can't test for smaller sizes because OBJ_ENCODING_EMBSTR_SIZE_LIMIT is 44 where no trim is done.
+ r eval {
+ return redis.call("SET", "foo", string.rep("a", 45))
+ } 0
+
+ # Assert the string has been trimmed and the 80 bytes from the previous alloc were not kept.
+ assert { [r memory usage foo] <= $expected_memory};
+ }
+ }
+}
diff --git a/tests/unit/shutdown.tcl b/tests/unit/shutdown.tcl
new file mode 100644
index 0000000..7504851
--- /dev/null
+++ b/tests/unit/shutdown.tcl
@@ -0,0 +1,133 @@
+start_server {tags {"shutdown external:skip"}} {
+ test {Temp rdb will be deleted if we use bg_unlink when shutdown} {
+ for {set i 0} {$i < 20} {incr i} {
+ r set $i $i
+ }
+ r config set rdb-key-save-delay 10000000
+
+ # Child is dumping rdb
+ r bgsave
+ wait_for_condition 1000 10 {
+ [s rdb_bgsave_in_progress] eq 1
+ } else {
+ fail "bgsave did not start in time"
+ }
+ after 100 ;# give the child a bit of time for the file to be created
+
+ set dir [lindex [r config get dir] 1]
+ set child_pid [get_child_pid 0]
+ set temp_rdb [file join [lindex [r config get dir] 1] temp-${child_pid}.rdb]
+ # Temp rdb must be existed
+ assert {[file exists $temp_rdb]}
+
+ catch {r shutdown nosave}
+ # Make sure the server was killed
+ catch {set rd [redis_deferring_client]} e
+ assert_match {*connection refused*} $e
+
+ # Temp rdb file must be deleted
+ assert {![file exists $temp_rdb]}
+ }
+}
+
+start_server {tags {"shutdown external:skip"} overrides {save {900 1}}} {
+ test {SHUTDOWN ABORT can cancel SIGTERM} {
+ r debug pause-cron 1
+ set pid [s process_id]
+ exec kill -SIGTERM $pid
+ after 10; # Give signal handler some time to run
+ r shutdown abort
+ verify_log_message 0 "*Shutdown manually aborted*" 0
+ r debug pause-cron 0
+ r ping
+ } {PONG}
+
+ test {Temp rdb will be deleted in signal handle} {
+ for {set i 0} {$i < 20} {incr i} {
+ r set $i $i
+ }
+ # It will cost 2s (20 * 100ms) to dump rdb
+ r config set rdb-key-save-delay 100000
+
+ set pid [s process_id]
+ set temp_rdb [file join [lindex [r config get dir] 1] temp-${pid}.rdb]
+
+ # trigger a shutdown which will save an rdb
+ exec kill -SIGINT $pid
+ # Wait for creation of temp rdb
+ wait_for_condition 50 10 {
+ [file exists $temp_rdb]
+ } else {
+ fail "Can't trigger rdb save on shutdown"
+ }
+
+ # Insist on immediate shutdown, temp rdb file must be deleted
+ exec kill -SIGINT $pid
+ # wait for the rdb file to be deleted
+ wait_for_condition 50 10 {
+ ![file exists $temp_rdb]
+ } else {
+ fail "Can't trigger rdb save on shutdown"
+ }
+ }
+}
+
+start_server {tags {"shutdown external:skip"} overrides {save {900 1}}} {
+ set pid [s process_id]
+ set dump_rdb [file join [lindex [r config get dir] 1] dump.rdb]
+
+ test {RDB save will be failed in shutdown} {
+ for {set i 0} {$i < 20} {incr i} {
+ r set $i $i
+ }
+
+ # create a folder called 'dump.rdb' to trigger temp-rdb rename failure
+ # and it will cause rdb save to fail eventually.
+ if {[file exists $dump_rdb]} {
+ exec rm -f $dump_rdb
+ }
+ exec mkdir -p $dump_rdb
+ }
+ test {SHUTDOWN will abort if rdb save failed on signal} {
+ # trigger a shutdown which will save an rdb
+ exec kill -SIGINT $pid
+ wait_for_log_messages 0 {"*Error trying to save the DB, can't exit*"} 0 100 10
+ }
+ test {SHUTDOWN will abort if rdb save failed on shutdown command} {
+ catch {[r shutdown]} err
+ assert_match {*Errors trying to SHUTDOWN*} $err
+ # make sure the server is still alive
+ assert_equal [r ping] {PONG}
+ }
+ test {SHUTDOWN can proceed if shutdown command was with nosave} {
+ catch {[r shutdown nosave]}
+ wait_for_log_messages 0 {"*ready to exit, bye bye*"} 0 100 10
+ }
+ test {Clean up rdb same named folder} {
+ exec rm -r $dump_rdb
+ }
+}
+
+
+start_server {tags {"shutdown external:skip"} overrides {appendonly no}} {
+ test {SHUTDOWN SIGTERM will abort if there's an initial AOFRW - default} {
+ r config set shutdown-on-sigterm default
+ r config set rdb-key-save-delay 10000000
+ for {set i 0} {$i < 10} {incr i} {
+ r set $i $i
+ }
+
+ r config set appendonly yes
+ wait_for_condition 1000 10 {
+ [s aof_rewrite_in_progress] eq 1
+ } else {
+ fail "aof rewrite did not start in time"
+ }
+
+ set pid [s process_id]
+ exec kill -SIGTERM $pid
+ wait_for_log_messages 0 {"*Writing initial AOF, can't exit*"} 0 1000 10
+
+ r config set shutdown-on-sigterm force
+ }
+}
diff --git a/tests/unit/slowlog.tcl b/tests/unit/slowlog.tcl
new file mode 100644
index 0000000..3c547b9
--- /dev/null
+++ b/tests/unit/slowlog.tcl
@@ -0,0 +1,228 @@
+start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} {
+ test {SLOWLOG - check that it starts with an empty log} {
+ if {$::external} {
+ r slowlog reset
+ }
+ r slowlog len
+ } {0}
+
+ test {SLOWLOG - only logs commands taking more time than specified} {
+ r config set slowlog-log-slower-than 100000
+ r ping
+ assert_equal [r slowlog len] 0
+ r debug sleep 0.2
+ assert_equal [r slowlog len] 1
+ } {} {needs:debug}
+
+ test {SLOWLOG - max entries is correctly handled} {
+ r config set slowlog-log-slower-than 0
+ r config set slowlog-max-len 10
+ for {set i 0} {$i < 100} {incr i} {
+ r ping
+ }
+ r slowlog len
+ } {10}
+
+ test {SLOWLOG - GET optional argument to limit output len works} {
+
+ assert_equal 5 [llength [r slowlog get 5]]
+ assert_equal 10 [llength [r slowlog get -1]]
+ assert_equal 10 [llength [r slowlog get 20]]
+ }
+
+ test {SLOWLOG - RESET subcommand works} {
+ r config set slowlog-log-slower-than 100000
+ r slowlog reset
+ r slowlog len
+ } {0}
+
+ test {SLOWLOG - logged entry sanity check} {
+ r client setname foobar
+ r debug sleep 0.2
+ set e [lindex [r slowlog get] 0]
+ assert_equal [llength $e] 6
+ if {!$::external} {
+ assert_equal [lindex $e 0] 107
+ }
+ assert_equal [expr {[lindex $e 2] > 100000}] 1
+ assert_equal [lindex $e 3] {debug sleep 0.2}
+ assert_equal {foobar} [lindex $e 5]
+ } {} {needs:debug}
+
+ test {SLOWLOG - Certain commands are omitted that contain sensitive information} {
+ r config set slowlog-log-slower-than 0
+ r slowlog reset
+ catch {r acl setuser "slowlog test user" +get +set} _
+ r config set masterauth ""
+ r acl setuser slowlog-test-user +get +set
+ r config set slowlog-log-slower-than 0
+ r config set slowlog-log-slower-than -1
+ set slowlog_resp [r slowlog get]
+
+ # Make sure normal configs work, but the two sensitive
+ # commands are omitted or redacted
+ assert_equal 5 [llength $slowlog_resp]
+ assert_equal {slowlog reset} [lindex [lindex $slowlog_resp 4] 3]
+ assert_equal {acl setuser (redacted) (redacted) (redacted)} [lindex [lindex $slowlog_resp 3] 3]
+ assert_equal {config set masterauth (redacted)} [lindex [lindex $slowlog_resp 2] 3]
+ assert_equal {acl setuser (redacted) (redacted) (redacted)} [lindex [lindex $slowlog_resp 1] 3]
+ assert_equal {config set slowlog-log-slower-than 0} [lindex [lindex $slowlog_resp 0] 3]
+ } {} {needs:repl}
+
+ test {SLOWLOG - Some commands can redact sensitive fields} {
+ r config set slowlog-log-slower-than 0
+ r slowlog reset
+ r migrate [srv 0 host] [srv 0 port] key 9 5000
+ r migrate [srv 0 host] [srv 0 port] key 9 5000 AUTH user
+ r migrate [srv 0 host] [srv 0 port] key 9 5000 AUTH2 user password
+ r config set slowlog-log-slower-than -1
+ set slowlog_resp [r slowlog get]
+
+ # Make sure all 3 commands were logged, but the sensitive fields are omitted
+ assert_equal 4 [llength $slowlog_resp]
+ assert_match {* key 9 5000} [lindex [lindex $slowlog_resp 2] 3]
+ assert_match {* key 9 5000 AUTH (redacted)} [lindex [lindex $slowlog_resp 1] 3]
+ assert_match {* key 9 5000 AUTH2 (redacted) (redacted)} [lindex [lindex $slowlog_resp 0] 3]
+ } {} {needs:repl}
+
+ test {SLOWLOG - Rewritten commands are logged as their original command} {
+ r config set slowlog-log-slower-than 0
+
+ # Test rewriting client arguments
+ r sadd set a b c d e
+ r slowlog reset
+
+ # SPOP is rewritten as DEL when all keys are removed
+ r spop set 10
+ assert_equal {spop set 10} [lindex [lindex [r slowlog get] 0] 3]
+
+ # Test replacing client arguments
+ r slowlog reset
+
+ # GEOADD is replicated as ZADD
+ r geoadd cool-cities -122.33207 47.60621 Seattle
+ assert_equal {geoadd cool-cities -122.33207 47.60621 Seattle} [lindex [lindex [r slowlog get] 0] 3]
+
+ # Test replacing a single command argument
+ r set A 5
+ r slowlog reset
+
+ # GETSET is replicated as SET
+ r getset a 5
+ assert_equal {getset a 5} [lindex [lindex [r slowlog get] 0] 3]
+
+ # INCRBYFLOAT calls rewrite multiple times, so it's a special case
+ r set A 0
+ r slowlog reset
+
+ # INCRBYFLOAT is replicated as SET
+ r INCRBYFLOAT A 1.0
+ assert_equal {INCRBYFLOAT A 1.0} [lindex [lindex [r slowlog get] 0] 3]
+
+ # blocked BLPOP is replicated as LPOP
+ set rd [redis_deferring_client]
+ $rd blpop l 0
+ wait_for_blocked_clients_count 1 50 100
+ r multi
+ r lpush l foo
+ r slowlog reset
+ r exec
+ $rd read
+ $rd close
+ assert_equal {blpop l 0} [lindex [lindex [r slowlog get] 0] 3]
+ }
+
+ test {SLOWLOG - commands with too many arguments are trimmed} {
+ r config set slowlog-log-slower-than 0
+ r slowlog reset
+ r sadd set 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
+ set e [lindex [r slowlog get] end-1]
+ lindex $e 3
+ } {sadd set 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 {... (2 more arguments)}}
+
+ test {SLOWLOG - too long arguments are trimmed} {
+ r config set slowlog-log-slower-than 0
+ r slowlog reset
+ set arg [string repeat A 129]
+ r sadd set foo $arg
+ set e [lindex [r slowlog get] end-1]
+ lindex $e 3
+ } {sadd set foo {AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA... (1 more bytes)}}
+
+ test {SLOWLOG - EXEC is not logged, just executed commands} {
+ r config set slowlog-log-slower-than 100000
+ r slowlog reset
+ assert_equal [r slowlog len] 0
+ r multi
+ r debug sleep 0.2
+ r exec
+ assert_equal [r slowlog len] 1
+ set e [lindex [r slowlog get] 0]
+ assert_equal [lindex $e 3] {debug sleep 0.2}
+ } {} {needs:debug}
+
+ test {SLOWLOG - can clean older entries} {
+ r client setname lastentry_client
+ r config set slowlog-max-len 1
+ r debug sleep 0.2
+ assert {[llength [r slowlog get]] == 1}
+ set e [lindex [r slowlog get] 0]
+ assert_equal {lastentry_client} [lindex $e 5]
+ } {} {needs:debug}
+
+ test {SLOWLOG - can be disabled} {
+ r config set slowlog-max-len 1
+ r config set slowlog-log-slower-than 1
+ r slowlog reset
+ r debug sleep 0.2
+ assert_equal [r slowlog len] 1
+ r config set slowlog-log-slower-than -1
+ r slowlog reset
+ r debug sleep 0.2
+ assert_equal [r slowlog len] 0
+ } {} {needs:debug}
+
+ test {SLOWLOG - count must be >= -1} {
+ assert_error "ERR count should be greater than or equal to -1" {r slowlog get -2}
+ assert_error "ERR count should be greater than or equal to -1" {r slowlog get -222}
+ }
+
+ test {SLOWLOG - get all slow logs} {
+ r config set slowlog-log-slower-than 0
+ r config set slowlog-max-len 3
+ r slowlog reset
+
+ r set key test
+ r sadd set a b c
+ r incr num
+ r lpush list a
+
+ assert_equal [r slowlog len] 3
+ assert_equal 0 [llength [r slowlog get 0]]
+ assert_equal 1 [llength [r slowlog get 1]]
+ assert_equal 3 [llength [r slowlog get -1]]
+ assert_equal 3 [llength [r slowlog get 3]]
+ }
+
+ test {SLOWLOG - blocking command is reported only after unblocked} {
+ # Cleanup first
+ r del mylist
+ # create a test client
+ set rd [redis_deferring_client]
+
+ # config the slowlog and reset
+ r config set slowlog-log-slower-than 0
+ r config set slowlog-max-len 110
+ r slowlog reset
+
+ $rd BLPOP mylist 0
+ wait_for_blocked_clients_count 1 50 20
+ assert_equal 0 [llength [regexp -all -inline (?=BLPOP) [r slowlog get]]]
+
+ r LPUSH mylist 1
+ wait_for_blocked_clients_count 0 50 20
+ assert_equal 1 [llength [regexp -all -inline (?=BLPOP) [r slowlog get]]]
+
+ $rd close
+ }
+}
diff --git a/tests/unit/sort.tcl b/tests/unit/sort.tcl
new file mode 100644
index 0000000..109f661
--- /dev/null
+++ b/tests/unit/sort.tcl
@@ -0,0 +1,359 @@
+start_server {
+ tags {"sort"}
+ overrides {
+ "list-max-ziplist-size" 16
+ "set-max-intset-entries" 32
+ }
+} {
+ proc create_random_dataset {num cmd} {
+ set tosort {}
+ set result {}
+ array set seenrand {}
+ r del tosort
+ for {set i 0} {$i < $num} {incr i} {
+ # Make sure all the weights are different because
+ # Redis does not use a stable sort but Tcl does.
+ while 1 {
+ randpath {
+ set rint [expr int(rand()*1000000)]
+ } {
+ set rint [expr rand()]
+ }
+ if {![info exists seenrand($rint)]} break
+ }
+ set seenrand($rint) x
+ r $cmd tosort $i
+ r set weight_$i $rint
+ r hset wobj_$i weight $rint
+ lappend tosort [list $i $rint]
+ }
+ set sorted [lsort -index 1 -real $tosort]
+ for {set i 0} {$i < $num} {incr i} {
+ lappend result [lindex $sorted $i 0]
+ }
+ set _ $result
+ }
+
+ proc check_sort_store_encoding {key} {
+ set listpack_max_size [lindex [r config get list-max-ziplist-size] 1]
+
+ # When the length or size of quicklist is less than the limit,
+ # it will be converted to listpack.
+ if {[r llen $key] <= $listpack_max_size} {
+ assert_encoding listpack $key
+ } else {
+ assert_encoding quicklist $key
+ }
+ }
+
+ foreach {num cmd enc title} {
+ 16 lpush listpack "Listpack"
+ 1000 lpush quicklist "Quicklist"
+ 10000 lpush quicklist "Big Quicklist"
+ 16 sadd intset "Intset"
+ 1000 sadd hashtable "Hash table"
+ 10000 sadd hashtable "Big Hash table"
+ } {
+ set result [create_random_dataset $num $cmd]
+ assert_encoding $enc tosort
+
+ test "$title: SORT BY key" {
+ assert_equal $result [r sort tosort BY weight_*]
+ } {} {cluster:skip}
+
+ test "$title: SORT BY key with limit" {
+ assert_equal [lrange $result 5 9] [r sort tosort BY weight_* LIMIT 5 5]
+ } {} {cluster:skip}
+
+ test "$title: SORT BY hash field" {
+ assert_equal $result [r sort tosort BY wobj_*->weight]
+ } {} {cluster:skip}
+ }
+
+ set result [create_random_dataset 16 lpush]
+ test "SORT GET #" {
+ assert_equal [lsort -integer $result] [r sort tosort GET #]
+ } {} {cluster:skip}
+
+foreach command {SORT SORT_RO} {
+ test "$command GET <const>" {
+ r del foo
+ set res [r $command tosort GET foo]
+ assert_equal 16 [llength $res]
+ foreach item $res { assert_equal {} $item }
+ } {} {cluster:skip}
+}
+
+ test "SORT GET (key and hash) with sanity check" {
+ set l1 [r sort tosort GET # GET weight_*]
+ set l2 [r sort tosort GET # GET wobj_*->weight]
+ foreach {id1 w1} $l1 {id2 w2} $l2 {
+ assert_equal $id1 $id2
+ assert_equal $w1 [r get weight_$id1]
+ assert_equal $w2 [r get weight_$id1]
+ }
+ } {} {cluster:skip}
+
+ test "SORT BY key STORE" {
+ r sort tosort BY weight_* store sort-res
+ assert_equal $result [r lrange sort-res 0 -1]
+ assert_equal 16 [r llen sort-res]
+ check_sort_store_encoding sort-res
+ } {} {cluster:skip}
+
+ test "SORT BY hash field STORE" {
+ r sort tosort BY wobj_*->weight store sort-res
+ assert_equal $result [r lrange sort-res 0 -1]
+ assert_equal 16 [r llen sort-res]
+ check_sort_store_encoding sort-res
+ } {} {cluster:skip}
+
+ test "SORT extracts STORE correctly" {
+ r command getkeys sort abc store def
+ } {abc def}
+
+ test "SORT_RO get keys" {
+ r command getkeys sort_ro abc
+ } {abc}
+
+ test "SORT extracts multiple STORE correctly" {
+ r command getkeys sort abc store invalid store stillbad store def
+ } {abc def}
+
+ test "SORT DESC" {
+ assert_equal [lsort -decreasing -integer $result] [r sort tosort DESC]
+ }
+
+ test "SORT ALPHA against integer encoded strings" {
+ r del mylist
+ r lpush mylist 2
+ r lpush mylist 1
+ r lpush mylist 3
+ r lpush mylist 10
+ r sort mylist alpha
+ } {1 10 2 3}
+
+ test "SORT sorted set" {
+ r del zset
+ r zadd zset 1 a
+ r zadd zset 5 b
+ r zadd zset 2 c
+ r zadd zset 10 d
+ r zadd zset 3 e
+ r sort zset alpha desc
+ } {e d c b a}
+
+ test "SORT sorted set BY nosort should retain ordering" {
+ r del zset
+ r zadd zset 1 a
+ r zadd zset 5 b
+ r zadd zset 2 c
+ r zadd zset 10 d
+ r zadd zset 3 e
+ r multi
+ r sort zset by nosort asc
+ r sort zset by nosort desc
+ r exec
+ } {{a c e b d} {d b e c a}}
+
+ test "SORT sorted set BY nosort + LIMIT" {
+ r del zset
+ r zadd zset 1 a
+ r zadd zset 5 b
+ r zadd zset 2 c
+ r zadd zset 10 d
+ r zadd zset 3 e
+ assert_equal [r sort zset by nosort asc limit 0 1] {a}
+ assert_equal [r sort zset by nosort desc limit 0 1] {d}
+ assert_equal [r sort zset by nosort asc limit 0 2] {a c}
+ assert_equal [r sort zset by nosort desc limit 0 2] {d b}
+ assert_equal [r sort zset by nosort limit 5 10] {}
+ assert_equal [r sort zset by nosort limit -10 100] {a c e b d}
+ }
+
+ test "SORT sorted set BY nosort works as expected from scripts" {
+ r del zset
+ r zadd zset 1 a
+ r zadd zset 5 b
+ r zadd zset 2 c
+ r zadd zset 10 d
+ r zadd zset 3 e
+ r eval {
+ return {redis.call('sort',KEYS[1],'by','nosort','asc'),
+ redis.call('sort',KEYS[1],'by','nosort','desc')}
+ } 1 zset
+ } {{a c e b d} {d b e c a}}
+
+ test "SORT sorted set: +inf and -inf handling" {
+ r del zset
+ r zadd zset -100 a
+ r zadd zset 200 b
+ r zadd zset -300 c
+ r zadd zset 1000000 d
+ r zadd zset +inf max
+ r zadd zset -inf min
+ r zrange zset 0 -1
+ } {min c a b d max}
+
+ test "SORT regression for issue #19, sorting floats" {
+ r flushdb
+ set floats {1.1 5.10 3.10 7.44 2.1 5.75 6.12 0.25 1.15}
+ foreach x $floats {
+ r lpush mylist $x
+ }
+ assert_equal [lsort -real $floats] [r sort mylist]
+ }
+
+ test "SORT with STORE returns zero if result is empty (github issue 224)" {
+ r flushdb
+ r sort foo{t} store bar{t}
+ } {0}
+
+ test "SORT with STORE does not create empty lists (github issue 224)" {
+ r flushdb
+ r lpush foo{t} bar
+ r sort foo{t} alpha limit 10 10 store zap{t}
+ r exists zap{t}
+ } {0}
+
+ test "SORT with STORE removes key if result is empty (github issue 227)" {
+ r flushdb
+ r lpush foo{t} bar
+ r sort emptylist{t} store foo{t}
+ r exists foo{t}
+ } {0}
+
+ test "SORT with BY <constant> and STORE should still order output" {
+ r del myset mylist
+ r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz
+ r sort myset alpha by _ store mylist
+ r lrange mylist 0 -1
+ } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} {cluster:skip}
+
+ test "SORT will complain with numerical sorting and bad doubles (1)" {
+ r del myset
+ r sadd myset 1 2 3 4 not-a-double
+ set e {}
+ catch {r sort myset} e
+ set e
+ } {*ERR*double*}
+
+ test "SORT will complain with numerical sorting and bad doubles (2)" {
+ r del myset
+ r sadd myset 1 2 3 4
+ r mset score:1 10 score:2 20 score:3 30 score:4 not-a-double
+ set e {}
+ catch {r sort myset by score:*} e
+ set e
+ } {*ERR*double*} {cluster:skip}
+
+ test "SORT BY sub-sorts lexicographically if score is the same" {
+ r del myset
+ r sadd myset a b c d e f g h i l m n o p q r s t u v z aa aaa azz
+ foreach ele {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} {
+ set score:$ele 100
+ }
+ r sort myset by score:*
+ } {a aa aaa azz b c d e f g h i l m n o p q r s t u v z} {cluster:skip}
+
+ test "SORT GET with pattern ending with just -> does not get hash field" {
+ r del mylist
+ r lpush mylist a
+ r set x:a-> 100
+ r sort mylist by num get x:*->
+ } {100} {cluster:skip}
+
+ test "SORT by nosort retains native order for lists" {
+ r del testa
+ r lpush testa 2 1 4 3 5
+ r sort testa by nosort
+ } {5 3 4 1 2} {cluster:skip}
+
+ test "SORT by nosort plus store retains native order for lists" {
+ r del testa
+ r lpush testa 2 1 4 3 5
+ r sort testa by nosort store testb
+ r lrange testb 0 -1
+ } {5 3 4 1 2} {cluster:skip}
+
+ test "SORT by nosort with limit returns based on original list order" {
+ r sort testa by nosort limit 0 3 store testb
+ r lrange testb 0 -1
+ } {5 3 4} {cluster:skip}
+
+ test "SORT_RO - Successful case" {
+ r del mylist
+ r lpush mylist a
+ r set x:a 100
+ r sort_ro mylist by nosort get x:*->
+ } {100} {cluster:skip}
+
+ test "SORT_RO - Cannot run with STORE arg" {
+ catch {r sort_ro foolist STORE bar} e
+ set e
+ } {ERR syntax error}
+
+ tags {"slow"} {
+ set num 100
+ set res [create_random_dataset $num lpush]
+
+ test "SORT speed, $num element list BY key, 100 times" {
+ set start [clock clicks -milliseconds]
+ for {set i 0} {$i < 100} {incr i} {
+ set sorted [r sort tosort BY weight_* LIMIT 0 10]
+ }
+ set elapsed [expr [clock clicks -milliseconds]-$start]
+ if {$::verbose} {
+ puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
+ flush stdout
+ }
+ } {} {cluster:skip}
+
+ test "SORT speed, $num element list BY hash field, 100 times" {
+ set start [clock clicks -milliseconds]
+ for {set i 0} {$i < 100} {incr i} {
+ set sorted [r sort tosort BY wobj_*->weight LIMIT 0 10]
+ }
+ set elapsed [expr [clock clicks -milliseconds]-$start]
+ if {$::verbose} {
+ puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
+ flush stdout
+ }
+ } {} {cluster:skip}
+
+ test "SORT speed, $num element list directly, 100 times" {
+ set start [clock clicks -milliseconds]
+ for {set i 0} {$i < 100} {incr i} {
+ set sorted [r sort tosort LIMIT 0 10]
+ }
+ set elapsed [expr [clock clicks -milliseconds]-$start]
+ if {$::verbose} {
+ puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
+ flush stdout
+ }
+ }
+
+ test "SORT speed, $num element list BY <const>, 100 times" {
+ set start [clock clicks -milliseconds]
+ for {set i 0} {$i < 100} {incr i} {
+ set sorted [r sort tosort BY nokey LIMIT 0 10]
+ }
+ set elapsed [expr [clock clicks -milliseconds]-$start]
+ if {$::verbose} {
+ puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds "
+ flush stdout
+ }
+ } {} {cluster:skip}
+ }
+
+ test {SETRANGE with huge offset} {
+ r lpush L 2 1 0
+ # expecting a different outcome on 32 and 64 bit systems
+ foreach value {9223372036854775807 2147483647} {
+ catch {[r sort_ro L by a limit 2 $value]} res
+ if {![string match "2" $res] && ![string match "*out of range*" $res]} {
+ assert_not_equal $res "expecting an error or 2"
+ }
+ }
+ }
+}
diff --git a/tests/unit/tls.tcl b/tests/unit/tls.tcl
new file mode 100644
index 0000000..29fe39f
--- /dev/null
+++ b/tests/unit/tls.tcl
@@ -0,0 +1,158 @@
+start_server {tags {"tls"}} {
+ if {$::tls} {
+ package require tls
+
+ test {TLS: Not accepting non-TLS connections on a TLS port} {
+ set s [redis [srv 0 host] [srv 0 port]]
+ catch {$s PING} e
+ set e
+ } {*I/O error*}
+
+ test {TLS: Verify tls-auth-clients behaves as expected} {
+ set s [redis [srv 0 host] [srv 0 port]]
+ ::tls::import [$s channel]
+ catch {$s PING} e
+ assert_match {*error*} $e
+
+ r CONFIG SET tls-auth-clients no
+
+ set s [redis [srv 0 host] [srv 0 port]]
+ ::tls::import [$s channel]
+ catch {$s PING} e
+ assert_match {PONG} $e
+
+ r CONFIG SET tls-auth-clients optional
+
+ set s [redis [srv 0 host] [srv 0 port]]
+ ::tls::import [$s channel]
+ catch {$s PING} e
+ assert_match {PONG} $e
+
+ r CONFIG SET tls-auth-clients yes
+
+ set s [redis [srv 0 host] [srv 0 port]]
+ ::tls::import [$s channel]
+ catch {$s PING} e
+ assert_match {*error*} $e
+ }
+
+ test {TLS: Verify tls-protocols behaves as expected} {
+ r CONFIG SET tls-protocols TLSv1.2
+
+ set s [redis [srv 0 host] [srv 0 port] 0 1 {-tls1.2 0}]
+ catch {$s PING} e
+ assert_match {*I/O error*} $e
+
+ set s [redis [srv 0 host] [srv 0 port] 0 1 {-tls1.2 1}]
+ catch {$s PING} e
+ assert_match {PONG} $e
+
+ r CONFIG SET tls-protocols ""
+ }
+
+ test {TLS: Verify tls-ciphers behaves as expected} {
+ r CONFIG SET tls-protocols TLSv1.2
+ r CONFIG SET tls-ciphers "DEFAULT:-AES128-SHA256"
+
+ set s [redis [srv 0 host] [srv 0 port] 0 1 {-cipher "-ALL:AES128-SHA256"}]
+ catch {$s PING} e
+ assert_match {*I/O error*} $e
+
+ set s [redis [srv 0 host] [srv 0 port] 0 1 {-cipher "-ALL:AES256-SHA256"}]
+ catch {$s PING} e
+ assert_match {PONG} $e
+
+ r CONFIG SET tls-ciphers "DEFAULT"
+
+ set s [redis [srv 0 host] [srv 0 port] 0 1 {-cipher "-ALL:AES128-SHA256"}]
+ catch {$s PING} e
+ assert_match {PONG} $e
+
+ r CONFIG SET tls-protocols ""
+ r CONFIG SET tls-ciphers "DEFAULT"
+ }
+
+ test {TLS: Verify tls-prefer-server-ciphers behaves as expected} {
+ r CONFIG SET tls-protocols TLSv1.2
+ r CONFIG SET tls-ciphers "AES128-SHA256:AES256-SHA256"
+
+ set s [redis [srv 0 host] [srv 0 port] 0 1 {-cipher "AES256-SHA256:AES128-SHA256"}]
+ catch {$s PING} e
+ assert_match {PONG} $e
+
+ assert_equal "AES256-SHA256" [dict get [::tls::status [$s channel]] cipher]
+
+ r CONFIG SET tls-prefer-server-ciphers yes
+
+ set s [redis [srv 0 host] [srv 0 port] 0 1 {-cipher "AES256-SHA256:AES128-SHA256"}]
+ catch {$s PING} e
+ assert_match {PONG} $e
+
+ assert_equal "AES128-SHA256" [dict get [::tls::status [$s channel]] cipher]
+
+ r CONFIG SET tls-protocols ""
+ r CONFIG SET tls-ciphers "DEFAULT"
+ }
+
+ test {TLS: Verify tls-cert-file is also used as a client cert if none specified} {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+
+ # Use a non-restricted client/server cert for the replica
+ set redis_crt [format "%s/tests/tls/redis.crt" [pwd]]
+ set redis_key [format "%s/tests/tls/redis.key" [pwd]]
+
+ start_server [list overrides [list tls-cert-file $redis_crt tls-key-file $redis_key] \
+ omit [list tls-client-cert-file tls-client-key-file]] {
+ set replica [srv 0 client]
+ $replica replicaof $master_host $master_port
+ wait_for_condition 30 100 {
+ [string match {*master_link_status:up*} [$replica info replication]]
+ } else {
+ fail "Can't authenticate to master using just tls-cert-file!"
+ }
+ }
+ }
+
+ test {TLS: switch between tcp and tls ports} {
+ set srv_port [srv 0 port]
+
+ # TLS
+ set rd [redis [srv 0 host] $srv_port 0 1]
+ $rd PING
+
+ # TCP
+ $rd CONFIG SET tls-port 0
+ $rd CONFIG SET port $srv_port
+ $rd close
+
+ set rd [redis [srv 0 host] $srv_port 0 0]
+ $rd PING
+
+ # TLS
+ $rd CONFIG SET port 0
+ $rd CONFIG SET tls-port $srv_port
+ $rd close
+
+ set rd [redis [srv 0 host] $srv_port 0 1]
+ $rd PING
+ $rd close
+ }
+
+ test {TLS: Working with an encrypted keyfile} {
+ # Create an encrypted version
+ set keyfile [lindex [r config get tls-key-file] 1]
+ set keyfile_encrypted "$keyfile.encrypted"
+ exec -ignorestderr openssl rsa -in $keyfile -out $keyfile_encrypted -aes256 -passout pass:1234 2>/dev/null
+
+ # Using it without a password fails
+ catch {r config set tls-key-file $keyfile_encrypted} e
+ assert_match {*Unable to update TLS*} $e
+
+ # Now use a password
+ r config set tls-key-file-pass 1234
+ r config set tls-key-file $keyfile_encrypted
+ }
+ }
+}
diff --git a/tests/unit/tracking.tcl b/tests/unit/tracking.tcl
new file mode 100644
index 0000000..bea8508
--- /dev/null
+++ b/tests/unit/tracking.tcl
@@ -0,0 +1,902 @@
+# logreqres:skip because it seems many of these tests rely heavily on RESP2
+start_server {tags {"tracking network logreqres:skip"}} {
+ # Create a deferred client we'll use to redirect invalidation
+ # messages to.
+ set rd_redirection [redis_deferring_client]
+ $rd_redirection client id
+ set redir_id [$rd_redirection read]
+ $rd_redirection subscribe __redis__:invalidate
+ $rd_redirection read ; # Consume the SUBSCRIBE reply.
+
+ # Create another client that's not used as a redirection client
+ # We should always keep this client's buffer clean
+ set rd [redis_deferring_client]
+
+ # Client to be used for SET and GET commands
+ # We don't read this client's buffer
+ set rd_sg [redis_client]
+
+ proc clean_all {} {
+ uplevel {
+ # We should make r TRACKING off first. If r is in RESP3,
+ # r FLUSH ALL will send us tracking-redir-broken or other
+ # info which will not be consumed.
+ r CLIENT TRACKING off
+ $rd QUIT
+ $rd_redirection QUIT
+ set rd [redis_deferring_client]
+ set rd_redirection [redis_deferring_client]
+ $rd_redirection client id
+ set redir_id [$rd_redirection read]
+ $rd_redirection subscribe __redis__:invalidate
+ $rd_redirection read ; # Consume the SUBSCRIBE reply.
+ r FLUSHALL
+ r HELLO 2
+ r config set tracking-table-max-keys 1000000
+ }
+ }
+
+ test {Clients are able to enable tracking and redirect it} {
+ r CLIENT TRACKING on REDIRECT $redir_id
+ } {*OK}
+
+ test {The other connection is able to get invalidations} {
+ r SET a{t} 1
+ r SET b{t} 1
+ r GET a{t}
+ r INCR b{t} ; # This key should not be notified, since it wasn't fetched.
+ r INCR a{t}
+ set keys [lindex [$rd_redirection read] 2]
+ assert {[llength $keys] == 1}
+ assert {[lindex $keys 0] eq {a{t}}}
+ }
+
+ test {The client is now able to disable tracking} {
+ # Make sure to add a few more keys in the tracking list
+ # so that we can check for leaks, as a side effect.
+ r MGET a{t} b{t} c{t} d{t} e{t} f{t} g{t}
+ r CLIENT TRACKING off
+ } {*OK}
+
+ test {Clients can enable the BCAST mode with the empty prefix} {
+ r CLIENT TRACKING on BCAST REDIRECT $redir_id
+ } {*OK*}
+
+ test {The connection gets invalidation messages about all the keys} {
+ r MSET a{t} 1 b{t} 2 c{t} 3
+ set keys [lsort [lindex [$rd_redirection read] 2]]
+ assert {$keys eq {a{t} b{t} c{t}}}
+ }
+
+ test {Clients can enable the BCAST mode with prefixes} {
+ r CLIENT TRACKING off
+ r CLIENT TRACKING on BCAST REDIRECT $redir_id PREFIX a: PREFIX b:
+ r MULTI
+ r INCR a:1{t}
+ r INCR a:2{t}
+ r INCR b:1{t}
+ r INCR b:2{t}
+ # we should not get this key
+ r INCR c:1{t}
+ r EXEC
+ # Because of the internals, we know we are going to receive
+ # two separated notifications for the two different prefixes.
+ set keys1 [lsort [lindex [$rd_redirection read] 2]]
+ set keys2 [lsort [lindex [$rd_redirection read] 2]]
+ set keys [lsort [list {*}$keys1 {*}$keys2]]
+ assert {$keys eq {a:1{t} a:2{t} b:1{t} b:2{t}}}
+ }
+
+ test {Adding prefixes to BCAST mode works} {
+ r CLIENT TRACKING on BCAST REDIRECT $redir_id PREFIX c:
+ r INCR c:1234
+ set keys [lsort [lindex [$rd_redirection read] 2]]
+ assert {$keys eq {c:1234}}
+ }
+
+ test {Tracking NOLOOP mode in standard mode works} {
+ r CLIENT TRACKING off
+ r CLIENT TRACKING on REDIRECT $redir_id NOLOOP
+ r MGET otherkey1{t} loopkey{t} otherkey2{t}
+ $rd_sg SET otherkey1{t} 1; # We should get this
+ r SET loopkey{t} 1 ; # We should not get this
+ $rd_sg SET otherkey2{t} 1; # We should get this
+ # Because of the internals, we know we are going to receive
+ # two separated notifications for the two different keys.
+ set keys1 [lsort [lindex [$rd_redirection read] 2]]
+ set keys2 [lsort [lindex [$rd_redirection read] 2]]
+ set keys [lsort [list {*}$keys1 {*}$keys2]]
+ assert {$keys eq {otherkey1{t} otherkey2{t}}}
+ }
+
+ test {Tracking NOLOOP mode in BCAST mode works} {
+ r CLIENT TRACKING off
+ r CLIENT TRACKING on BCAST REDIRECT $redir_id NOLOOP
+ $rd_sg SET otherkey1 1; # We should get this
+ r SET loopkey 1 ; # We should not get this
+ $rd_sg SET otherkey2 1; # We should get this
+ # Because $rd_sg send command synchronously, we know we are
+ # going to receive two separated notifications.
+ set keys1 [lsort [lindex [$rd_redirection read] 2]]
+ set keys2 [lsort [lindex [$rd_redirection read] 2]]
+ set keys [lsort [list {*}$keys1 {*}$keys2]]
+ assert {$keys eq {otherkey1 otherkey2}}
+ }
+
+ test {Tracking gets notification of expired keys} {
+ r CLIENT TRACKING off
+ r CLIENT TRACKING on BCAST REDIRECT $redir_id NOLOOP
+ r SET mykey myval px 1
+ r SET mykeyotherkey myval ; # We should not get it
+ after 1000
+ set keys [lsort [lindex [$rd_redirection read] 2]]
+ assert {$keys eq {mykey}}
+ }
+
+ test {Tracking gets notification of lazy expired keys} {
+ r CLIENT TRACKING off
+ r CLIENT TRACKING on BCAST REDIRECT $redir_id NOLOOP
+ # Use multi-exec to expose a race where the key gets an two invalidations
+ # in the same event loop, once by the client so filtered by NOLOOP, and
+ # the second one by the lazy expire
+ r MULTI
+ r SET mykey{t} myval px 1
+ r SET mykeyotherkey{t} myval ; # We should not get it
+ r DEBUG SLEEP 0.1
+ r GET mykey{t}
+ r EXEC
+ set keys [lsort [lindex [$rd_redirection read] 2]]
+ assert {$keys eq {mykey{t}}}
+ } {} {needs:debug}
+
+ test {HELLO 3 reply is correct} {
+ set reply [r HELLO 3]
+ assert_equal [dict get $reply proto] 3
+ }
+
+ test {HELLO without protover} {
+ set reply [r HELLO 3]
+ assert_equal [dict get $reply proto] 3
+
+ set reply [r HELLO]
+ assert_equal [dict get $reply proto] 3
+
+ set reply [r HELLO 2]
+ assert_equal [dict get $reply proto] 2
+
+ set reply [r HELLO]
+ assert_equal [dict get $reply proto] 2
+
+ # restore RESP3 for next test
+ r HELLO 3
+ }
+
+ test {RESP3 based basic invalidation} {
+ r CLIENT TRACKING off
+ r CLIENT TRACKING on
+ $rd_sg SET key1 1
+ r GET key1
+ $rd_sg SET key1 2
+ r read
+ } {invalidate key1}
+
+ test {RESP3 tracking redirection} {
+ r CLIENT TRACKING off
+ r CLIENT TRACKING on REDIRECT $redir_id
+ $rd_sg SET key1 1
+ r GET key1
+ $rd_sg SET key1 2
+ set res [lindex [$rd_redirection read] 2]
+ assert {$res eq {key1}}
+ }
+
+ test {Invalidations of previous keys can be redirected after switching to RESP3} {
+ r HELLO 2
+ $rd_sg SET key1 1
+ r GET key1
+ r HELLO 3
+ $rd_sg SET key1 2
+ set res [lindex [$rd_redirection read] 2]
+ assert {$res eq {key1}}
+ }
+
+ test {Invalidations of new keys can be redirected after switching to RESP3} {
+ r HELLO 3
+ $rd_sg SET key1 1
+ r GET key1
+ $rd_sg SET key1 2
+ set res [lindex [$rd_redirection read] 2]
+ assert {$res eq {key1}}
+ }
+
+ test {Invalid keys should not be tracked for scripts in NOLOOP mode} {
+ $rd_sg CLIENT TRACKING off
+ $rd_sg CLIENT TRACKING on NOLOOP
+ $rd_sg HELLO 3
+ $rd_sg SET key1 1
+ assert_equal "1" [$rd_sg GET key1]
+
+ # For write command in script, invalid key should not be tracked with NOLOOP flag
+ $rd_sg eval "return redis.call('set', 'key1', '2')" 1 key1
+ assert_equal "2" [$rd_sg GET key1]
+ $rd_sg CLIENT TRACKING off
+ }
+
+ test {Tracking only occurs for scripts when a command calls a read-only command} {
+ r CLIENT TRACKING off
+ r CLIENT TRACKING on
+ $rd_sg MSET key2{t} 1 key2{t} 1
+
+ # If a script doesn't call any read command, don't track any keys
+ r EVAL "redis.call('set', 'key3{t}', 'bar')" 2 key1{t} key2{t}
+ $rd_sg MSET key2{t} 2 key1{t} 2
+ assert_equal "PONG" [r ping]
+
+ # If a script calls a read command, just the read keys
+ r EVAL "redis.call('get', 'key2{t}')" 2 key1{t} key2{t}
+ $rd_sg MSET key2{t} 2 key3{t} 2
+ assert_equal {invalidate key2{t}} [r read]
+ assert_equal "PONG" [r ping]
+
+ # RO variants work like the normal variants
+
+ # If a RO script doesn't call any read command, don't track any keys
+ r EVAL_RO "redis.call('ping')" 2 key1{t} key2{t}
+ $rd_sg MSET key2{t} 2 key1{t} 2
+ assert_equal "PONG" [r ping]
+
+ # If a RO script calls a read command, just the read keys
+ r EVAL_RO "redis.call('get', 'key2{t}')" 2 key1{t} key2{t}
+ $rd_sg MSET key2{t} 2 key3{t} 2
+ assert_equal {invalidate key2{t}} [r read]
+ assert_equal "PONG" [r ping]
+ }
+
+ test {RESP3 Client gets tracking-redir-broken push message after cached key changed when rediretion client is terminated} {
+ r CLIENT TRACKING on REDIRECT $redir_id
+ $rd_sg SET key1 1
+ r GET key1
+ $rd_redirection QUIT
+ assert_equal OK [$rd_redirection read]
+ $rd_sg SET key1 2
+ set MAX_TRIES 100
+ set res -1
+ for {set i 0} {$i <= $MAX_TRIES && $res < 0} {incr i} {
+ set res [lsearch -exact [r PING] "tracking-redir-broken"]
+ }
+ assert {$res >= 0}
+ # Consume PING reply
+ assert_equal PONG [r read]
+
+ # Reinstantiating after QUIT
+ set rd_redirection [redis_deferring_client]
+ $rd_redirection CLIENT ID
+ set redir_id [$rd_redirection read]
+ $rd_redirection SUBSCRIBE __redis__:invalidate
+ $rd_redirection read ; # Consume the SUBSCRIBE reply
+ }
+
+ test {Different clients can redirect to the same connection} {
+ r CLIENT TRACKING on REDIRECT $redir_id
+ $rd CLIENT TRACKING on REDIRECT $redir_id
+ assert_equal OK [$rd read] ; # Consume the TRACKING reply
+ $rd_sg MSET key1{t} 1 key2{t} 1
+ r GET key1{t}
+ $rd GET key2{t}
+ assert_equal 1 [$rd read] ; # Consume the GET reply
+ $rd_sg INCR key1{t}
+ $rd_sg INCR key2{t}
+ set res1 [lindex [$rd_redirection read] 2]
+ set res2 [lindex [$rd_redirection read] 2]
+ assert {$res1 eq {key1{t}}}
+ assert {$res2 eq {key2{t}}}
+ }
+
+ test {Different clients using different protocols can track the same key} {
+ $rd HELLO 3
+ set reply [$rd read] ; # Consume the HELLO reply
+ assert_equal 3 [dict get $reply proto]
+ $rd CLIENT TRACKING on
+ assert_equal OK [$rd read] ; # Consume the TRACKING reply
+ $rd_sg set key1 1
+ r GET key1
+ $rd GET key1
+ assert_equal 1 [$rd read] ; # Consume the GET reply
+ $rd_sg INCR key1
+ set res1 [lindex [$rd_redirection read] 2]
+ $rd PING ; # Non redirecting client has to talk to the server in order to get invalidation message
+ set res2 [lindex [split [$rd read] " "] 1]
+ assert_equal PONG [$rd read] ; # Consume the PING reply, which comes together with the invalidation message
+ assert {$res1 eq {key1}}
+ assert {$res2 eq {key1}}
+ }
+
+ test {No invalidation message when using OPTIN option} {
+ r CLIENT TRACKING on OPTIN REDIRECT $redir_id
+ $rd_sg SET key1 1
+ r GET key1 ; # This key should not be notified, since OPTIN is on and CLIENT CACHING yes wasn't called
+ $rd_sg SET key1 2
+ # Preparing some message to consume on $rd_redirection so we don't get blocked
+ r CLIENT TRACKING off
+ r CLIENT TRACKING on REDIRECT $redir_id
+ $rd_sg SET key2 1
+ r GET key2 ; # This key should be notified
+ $rd_sg SET key2 2
+ set res [lindex [$rd_redirection read] 2]
+ assert {$res eq {key2}}
+ }
+
+ test {Invalidation message sent when using OPTIN option with CLIENT CACHING yes} {
+ r CLIENT TRACKING on OPTIN REDIRECT $redir_id
+ $rd_sg SET key1 3
+ r CLIENT CACHING yes
+ r GET key1
+ $rd_sg SET key1 4
+ set res [lindex [$rd_redirection read] 2]
+ assert {$res eq {key1}}
+ }
+
+ test {Invalidation message sent when using OPTOUT option} {
+ r CLIENT TRACKING off
+ r CLIENT TRACKING on OPTOUT REDIRECT $redir_id
+ $rd_sg SET key1 1
+ r GET key1
+ $rd_sg SET key1 2
+ set res [lindex [$rd_redirection read] 2]
+ assert {$res eq {key1}}
+ }
+
+ test {No invalidation message when using OPTOUT option with CLIENT CACHING no} {
+ $rd_sg SET key1 1
+ r CLIENT CACHING no
+ r GET key1 ; # This key should not be notified, since OPTOUT is on and CLIENT CACHING no was called
+ $rd_sg SET key1 2
+ # Preparing some message to consume on $rd_redirection so we don't get blocked
+ $rd_sg SET key2 1
+ r GET key2 ; # This key should be notified
+ $rd_sg SET key2 2
+ set res [lindex [$rd_redirection read] 2]
+ assert {$res eq {key2}}
+ }
+
+ test {Able to redirect to a RESP3 client} {
+ $rd_redirection UNSUBSCRIBE __redis__:invalidate ; # Need to unsub first before we can do HELLO 3
+ set res [$rd_redirection read] ; # Consume the UNSUBSCRIBE reply
+ assert_equal {__redis__:invalidate} [lindex $res 1]
+ $rd_redirection HELLO 3
+ set res [$rd_redirection read] ; # Consume the HELLO reply
+ assert_equal [dict get $reply proto] 3
+ $rd_redirection SUBSCRIBE __redis__:invalidate
+ set res [$rd_redirection read] ; # Consume the SUBSCRIBE reply
+ assert_equal {__redis__:invalidate} [lindex $res 1]
+ r CLIENT TRACKING on REDIRECT $redir_id
+ $rd_sg SET key1 1
+ r GET key1
+ $rd_sg INCR key1
+ set res [lindex [$rd_redirection read] 1]
+ assert {$res eq {key1}}
+ $rd_redirection HELLO 2
+ set res [$rd_redirection read] ; # Consume the HELLO reply
+ assert_equal [dict get $res proto] 2
+ }
+
+ test {After switching from normal tracking to BCAST mode, no invalidation message is produced for pre-BCAST keys} {
+ r CLIENT TRACKING off
+ r HELLO 3
+ r CLIENT TRACKING on
+ $rd_sg SET key1 1
+ r GET key1
+ r CLIENT TRACKING off
+ r CLIENT TRACKING on BCAST
+ $rd_sg INCR key1
+ set inv_msg [r PING]
+ set ping_reply [r read]
+ assert {$inv_msg eq {invalidate key1}}
+ assert {$ping_reply eq {PONG}}
+ }
+
+ test {BCAST with prefix collisions throw errors} {
+ set r [redis_client]
+ catch {$r CLIENT TRACKING ON BCAST PREFIX FOOBAR PREFIX FOO} output
+ assert_match {ERR Prefix 'FOOBAR'*'FOO'*} $output
+
+ catch {$r CLIENT TRACKING ON BCAST PREFIX FOO PREFIX FOOBAR} output
+ assert_match {ERR Prefix 'FOO'*'FOOBAR'*} $output
+
+ $r CLIENT TRACKING ON BCAST PREFIX FOO PREFIX BAR
+ catch {$r CLIENT TRACKING ON BCAST PREFIX FO} output
+ assert_match {ERR Prefix 'FO'*'FOO'*} $output
+
+ catch {$r CLIENT TRACKING ON BCAST PREFIX BARB} output
+ assert_match {ERR Prefix 'BARB'*'BAR'*} $output
+
+ $r CLIENT TRACKING OFF
+ }
+
+ test {hdel deliver invalidate message after response in the same connection} {
+ r CLIENT TRACKING off
+ r HELLO 3
+ r CLIENT TRACKING on
+ r HSET myhash f 1
+ r HGET myhash f
+ set res [r HDEL myhash f]
+ assert_equal $res 1
+ set res [r read]
+ assert_equal $res {invalidate myhash}
+ }
+
+ test {Tracking invalidation message is not interleaved with multiple keys response} {
+ r CLIENT TRACKING off
+ r HELLO 3
+ r CLIENT TRACKING on
+ # We need disable active expire, so we can trigger lazy expire
+ r DEBUG SET-ACTIVE-EXPIRE 0
+ r MULTI
+ r MSET x{t} 1 y{t} 2
+ r PEXPIRE y{t} 100
+ r GET y{t}
+ r EXEC
+ after 110
+ # Read expired key y{t}, generate invalidate message about this key
+ set res [r MGET x{t} y{t}]
+ assert_equal $res {1 {}}
+ # Consume the invalidate message which is after command response
+ set res [r read]
+ assert_equal $res {invalidate y{t}}
+ r DEBUG SET-ACTIVE-EXPIRE 1
+ } {OK} {needs:debug}
+
+ test {Tracking invalidation message is not interleaved with transaction response} {
+ r CLIENT TRACKING off
+ r HELLO 3
+ r CLIENT TRACKING on
+ r MSET a{t} 1 b{t} 2
+ r GET a{t}
+ # Start a transaction, make a{t} generate an invalidate message
+ r MULTI
+ r INCR a{t}
+ r GET b{t}
+ set res [r EXEC]
+ assert_equal $res {2 2}
+ set res [r read]
+ # Consume the invalidate message which is after command response
+ assert_equal $res {invalidate a{t}}
+ }
+
+ test {Tracking invalidation message of eviction keys should be before response} {
+ # Get the current memory limit and calculate a new limit.
+ r CLIENT TRACKING off
+ r HELLO 3
+ r CLIENT TRACKING on
+
+ # make the previous test is really done before sampling used_memory
+ wait_lazyfree_done r
+
+ set used [expr {[s used_memory] - [s mem_not_counted_for_evict]}]
+ set limit [expr {$used+100*1024}]
+ set old_policy [lindex [r config get maxmemory-policy] 1]
+ r config set maxmemory $limit
+ # We set policy volatile-random, so only keys with ttl will be evicted
+ r config set maxmemory-policy volatile-random
+ # Add a volatile key and tracking it.
+ r setex volatile-key 10000 x
+ r get volatile-key
+ # We use SETBIT here, so we can set a big key and get the used_memory
+ # bigger than maxmemory. Next command will evict volatile keys. We
+ # can't use SET, as SET uses big input buffer, so it will fail.
+ r setbit big-key 1600000 0 ;# this will consume 200kb
+ # volatile-key is evicted before response.
+ set res [r getbit big-key 0]
+ assert_equal $res {invalidate volatile-key}
+ set res [r read]
+ assert_equal $res 0
+ r config set maxmemory-policy $old_policy
+ r config set maxmemory 0
+ }
+
+ test {Unblocked BLMOVE gets notification after response} {
+ r RPUSH list2{t} a
+ $rd HELLO 3
+ $rd read
+ $rd CLIENT TRACKING on
+ $rd read
+ # Tracking key list2{t}
+ $rd LRANGE list2{t} 0 -1
+ $rd read
+ # We block on list1{t}
+ $rd BLMOVE list1{t} list2{t} left left 0
+ wait_for_blocked_clients_count 1
+ # unblock $rd, list2{t} gets element and generate invalidation message
+ r rpush list1{t} foo
+ assert_equal [$rd read] {foo}
+ assert_equal [$rd read] {invalidate list2{t}}
+ }
+
+ test {Tracking gets notification on tracking table key eviction} {
+ r CLIENT TRACKING off
+ r CLIENT TRACKING on REDIRECT $redir_id NOLOOP
+ r MSET key1{t} 1 key2{t} 2
+ # Let the server track the two keys for us
+ r MGET key1{t} key2{t}
+ # Force the eviction of all the keys but one:
+ r config set tracking-table-max-keys 1
+ # Note that we may have other keys in the table for this client,
+ # since we disabled/enabled tracking multiple time with the same
+ # ID, and tracking does not do ID cleanups for performance reasons.
+ # So we check that eventually we'll receive one or the other key,
+ # otherwise the test will die for timeout.
+ while 1 {
+ set keys [lindex [$rd_redirection read] 2]
+ if {$keys eq {key1{t}} || $keys eq {key2{t}}} break
+ }
+ # We should receive an expire notification for one of
+ # the two keys (only one must remain)
+ assert {$keys eq {key1{t}} || $keys eq {key2{t}}}
+ }
+
+ test {Invalidation message received for flushall} {
+ clean_all
+ r CLIENT TRACKING on REDIRECT $redir_id
+ $rd_sg SET key1 1
+ r GET key1
+ $rd_sg FLUSHALL
+ set msg [$rd_redirection read]
+ assert {[lindex msg 2] eq {} }
+ }
+
+ test {Invalidation message received for flushdb} {
+ clean_all
+ r CLIENT TRACKING on REDIRECT $redir_id
+ $rd_sg SET key1 1
+ r GET key1
+ $rd_sg FLUSHDB
+ set msg [$rd_redirection read]
+ assert {[lindex msg 2] eq {} }
+ }
+
+ test {Test ASYNC flushall} {
+ clean_all
+ r CLIENT TRACKING on REDIRECT $redir_id
+ r GET key1
+ r GET key2
+ assert_equal [s 0 tracking_total_keys] 2
+ $rd_sg FLUSHALL ASYNC
+ assert_equal [s 0 tracking_total_keys] 0
+ assert_equal [lindex [$rd_redirection read] 2] {}
+ }
+
+ test {flushdb tracking invalidation message is not interleaved with transaction response} {
+ clean_all
+ r HELLO 3
+ r CLIENT TRACKING on
+ r SET a{t} 1
+ r GET a{t}
+ r MULTI
+ r FLUSHDB
+ set res [r EXEC]
+ assert_equal $res {OK}
+ # Consume the invalidate message which is after command response
+ r read
+ } {invalidate {}}
+
+ # Keys are defined to be evicted 100 at a time by default.
+ # If after eviction the number of keys still surpasses the limit
+ # defined in tracking-table-max-keys, we increases eviction
+ # effort to 200, and then 300, etc.
+ # This test tests this effort incrementation.
+ test {Server is able to evacuate enough keys when num of keys surpasses limit by more than defined initial effort} {
+ clean_all
+ set NUM_OF_KEYS_TO_TEST 250
+ set TRACKING_TABLE_MAX_KEYS 1
+ r CLIENT TRACKING on REDIRECT $redir_id
+ for {set i 0} {$i < $NUM_OF_KEYS_TO_TEST} {incr i} {
+ $rd_sg SET key$i $i
+ r GET key$i
+ }
+ r config set tracking-table-max-keys $TRACKING_TABLE_MAX_KEYS
+ # If not enough keys are evicted, we won't get enough invalidation
+ # messages, and "$rd_redirection read" will block.
+ # If too many keys are evicted, we will get too many invalidation
+ # messages, and the assert will fail.
+ for {set i 0} {$i < $NUM_OF_KEYS_TO_TEST - $TRACKING_TABLE_MAX_KEYS} {incr i} {
+ $rd_redirection read
+ }
+ $rd_redirection PING
+ assert {[$rd_redirection read] eq {pong {}}}
+ }
+
+ test {Tracking info is correct} {
+ clean_all
+ r CLIENT TRACKING on REDIRECT $redir_id
+ $rd_sg SET key1 1
+ $rd_sg SET key2 2
+ r GET key1
+ r GET key2
+ $rd CLIENT TRACKING on BCAST PREFIX prefix:
+ assert [string match *OK* [$rd read]]
+ $rd_sg SET prefix:key1 1
+ $rd_sg SET prefix:key2 2
+ set info [r info]
+ regexp "\r\ntracking_total_items:(.*?)\r\n" $info _ total_items
+ regexp "\r\ntracking_total_keys:(.*?)\r\n" $info _ total_keys
+ regexp "\r\ntracking_total_prefixes:(.*?)\r\n" $info _ total_prefixes
+ regexp "\r\ntracking_clients:(.*?)\r\n" $info _ tracking_clients
+ assert {$total_items == 2}
+ assert {$total_keys == 2}
+ assert {$total_prefixes == 1}
+ assert {$tracking_clients == 2}
+ }
+
+ test {CLIENT GETREDIR provides correct client id} {
+ set res [r CLIENT GETREDIR]
+ assert_equal $redir_id $res
+ r CLIENT TRACKING off
+ set res [r CLIENT GETREDIR]
+ assert_equal -1 $res
+ r CLIENT TRACKING on
+ set res [r CLIENT GETREDIR]
+ assert_equal 0 $res
+ }
+
+ test {CLIENT TRACKINGINFO provides reasonable results when tracking off} {
+ r CLIENT TRACKING off
+ set res [r client trackinginfo]
+ set flags [dict get $res flags]
+ assert_equal {off} $flags
+ set redirect [dict get $res redirect]
+ assert_equal {-1} $redirect
+ set prefixes [dict get $res prefixes]
+ assert_equal {} $prefixes
+ }
+
+ test {CLIENT TRACKINGINFO provides reasonable results when tracking on} {
+ r CLIENT TRACKING on
+ set res [r client trackinginfo]
+ set flags [dict get $res flags]
+ assert_equal {on} $flags
+ set redirect [dict get $res redirect]
+ assert_equal {0} $redirect
+ set prefixes [dict get $res prefixes]
+ assert_equal {} $prefixes
+ }
+
+ test {CLIENT TRACKINGINFO provides reasonable results when tracking on with options} {
+ r CLIENT TRACKING on REDIRECT $redir_id noloop
+ set res [r client trackinginfo]
+ set flags [dict get $res flags]
+ assert_equal {on noloop} $flags
+ set redirect [dict get $res redirect]
+ assert_equal $redir_id $redirect
+ set prefixes [dict get $res prefixes]
+ assert_equal {} $prefixes
+ }
+
+ test {CLIENT TRACKINGINFO provides reasonable results when tracking optin} {
+ r CLIENT TRACKING off
+ r CLIENT TRACKING on optin
+ set res [r client trackinginfo]
+ set flags [dict get $res flags]
+ assert_equal {on optin} $flags
+ set redirect [dict get $res redirect]
+ assert_equal {0} $redirect
+ set prefixes [dict get $res prefixes]
+ assert_equal {} $prefixes
+
+ r CLIENT CACHING yes
+ set res [r client trackinginfo]
+ set flags [dict get $res flags]
+ assert_equal {on optin caching-yes} $flags
+ }
+
+ test {CLIENT TRACKINGINFO provides reasonable results when tracking optout} {
+ r CLIENT TRACKING off
+ r CLIENT TRACKING on optout
+ set res [r client trackinginfo]
+ set flags [dict get $res flags]
+ assert_equal {on optout} $flags
+ set redirect [dict get $res redirect]
+ assert_equal {0} $redirect
+ set prefixes [dict get $res prefixes]
+ assert_equal {} $prefixes
+
+ r CLIENT CACHING no
+ set res [r client trackinginfo]
+ set flags [dict get $res flags]
+ assert_equal {on optout caching-no} $flags
+ }
+
+ test {CLIENT TRACKINGINFO provides reasonable results when tracking bcast mode} {
+ r CLIENT TRACKING off
+ r CLIENT TRACKING on BCAST PREFIX foo PREFIX bar
+ set res [r client trackinginfo]
+ set flags [dict get $res flags]
+ assert_equal {on bcast} $flags
+ set redirect [dict get $res redirect]
+ assert_equal {0} $redirect
+ set prefixes [lsort [dict get $res prefixes]]
+ assert_equal {bar foo} $prefixes
+
+ r CLIENT TRACKING off
+ r CLIENT TRACKING on BCAST
+ set res [r client trackinginfo]
+ set prefixes [dict get $res prefixes]
+ assert_equal {{}} $prefixes
+ }
+
+ test {CLIENT TRACKINGINFO provides reasonable results when tracking redir broken} {
+ clean_all
+ r HELLO 3
+ r CLIENT TRACKING on REDIRECT $redir_id
+ $rd_sg SET key1 1
+ r GET key1
+ $rd_redirection QUIT
+ assert_equal OK [$rd_redirection read]
+ $rd_sg SET key1 2
+ set res [lsearch -exact [r read] "tracking-redir-broken"]
+ assert {$res >= 0}
+ set res [r client trackinginfo]
+ set flags [dict get $res flags]
+ assert_equal {on broken_redirect} $flags
+ set redirect [dict get $res redirect]
+ assert_equal $redir_id $redirect
+ set prefixes [dict get $res prefixes]
+ assert_equal {} $prefixes
+ }
+
+ test {Regression test for #11715} {
+ # This issue manifests when a client invalidates keys through the max key
+ # limit, which invalidates keys to get Redis below the limit, but no command is
+ # then executed. This can occur in several ways but the simplest is through
+ # multi-exec which queues commands.
+ clean_all
+ r config set tracking-table-max-keys 2
+
+ # The cron will invalidate keys if we're above the limit, so disable it.
+ r debug pause-cron 1
+
+ # Set up a client that has listened to 2 keys and start a multi, this
+ # sets up the crash for later.
+ $rd HELLO 3
+ $rd read
+ $rd CLIENT TRACKING on
+ assert_match "OK" [$rd read]
+ $rd mget "1{tag}" "2{tag}"
+ assert_match "{} {}" [$rd read]
+ $rd multi
+ assert_match "OK" [$rd read]
+
+ # Reduce the tracking table keys to 1, this doesn't immediately take affect, but
+ # instead will apply on the next command.
+ r config set tracking-table-max-keys 1
+
+ # This command will get queued, so make sure this command doesn't crash.
+ $rd ping
+ $rd exec
+
+ # Validate we got some invalidation message and then the command was queued.
+ assert_match "invalidate *{tag}" [$rd read]
+ assert_match "QUEUED" [$rd read]
+ assert_match "PONG" [$rd read]
+
+ r debug pause-cron 0
+ } {OK} {needs:debug}
+
+ foreach resp {3 2} {
+ test "RESP$resp based basic invalidation with client reply off" {
+ # This entire test is mostly irrelevant for RESP2, but we run it anyway just for some extra coverage.
+ clean_all
+
+ $rd hello $resp
+ $rd read
+ $rd client tracking on
+ $rd read
+
+ $rd_sg set foo bar
+ $rd get foo
+ $rd read
+
+ $rd client reply off
+
+ $rd_sg set foo bar2
+
+ if {$resp == 3} {
+ assert_equal {invalidate foo} [$rd read]
+ } elseif {$resp == 2} { } ;# Just coverage
+
+ # Verify things didn't get messed up and no unexpected reply was pushed to the client.
+ $rd client reply on
+ assert_equal {OK} [$rd read]
+ $rd ping
+ assert_equal {PONG} [$rd read]
+ }
+ }
+
+ test {RESP3 based basic redirect invalidation with client reply off} {
+ clean_all
+
+ set rd_redir [redis_deferring_client]
+ $rd_redir hello 3
+ $rd_redir read
+
+ $rd_redir client id
+ set rd_redir_id [$rd_redir read]
+
+ $rd client tracking on redirect $rd_redir_id
+ $rd read
+
+ $rd_sg set foo bar
+ $rd get foo
+ $rd read
+
+ $rd_redir client reply off
+
+ $rd_sg set foo bar2
+ assert_equal {invalidate foo} [$rd_redir read]
+
+ # Verify things didn't get messed up and no unexpected reply was pushed to the client.
+ $rd_redir client reply on
+ assert_equal {OK} [$rd_redir read]
+ $rd_redir ping
+ assert_equal {PONG} [$rd_redir read]
+
+ $rd_redir close
+ }
+
+ test {RESP3 based basic tracking-redir-broken with client reply off} {
+ clean_all
+
+ $rd hello 3
+ $rd read
+ $rd client tracking on redirect $redir_id
+ $rd read
+
+ $rd_sg set foo bar
+ $rd get foo
+ $rd read
+
+ $rd client reply off
+
+ $rd_redirection quit
+ $rd_redirection read
+
+ $rd_sg set foo bar2
+
+ set res [lsearch -exact [$rd read] "tracking-redir-broken"]
+ assert_morethan_equal $res 0
+
+ # Verify things didn't get messed up and no unexpected reply was pushed to the client.
+ $rd client reply on
+ assert_equal {OK} [$rd read]
+ $rd ping
+ assert_equal {PONG} [$rd read]
+ }
+
+ $rd_redirection close
+ $rd_sg close
+ $rd close
+}
+
+# Just some extra covergae for --log-req-res, because we do not
+# run the full tracking unit in that mode
+start_server {tags {"tracking network"}} {
+ test {Coverage: Basic CLIENT CACHING} {
+ set rd_redirection [redis_deferring_client]
+ $rd_redirection client id
+ set redir_id [$rd_redirection read]
+ assert_equal {OK} [r CLIENT TRACKING on OPTIN REDIRECT $redir_id]
+ assert_equal {OK} [r CLIENT CACHING yes]
+ r CLIENT TRACKING off
+ } {OK}
+
+ test {Coverage: Basic CLIENT REPLY} {
+ r CLIENT REPLY on
+ } {OK}
+
+ test {Coverage: Basic CLIENT TRACKINGINFO} {
+ r CLIENT TRACKINGINFO
+ } {flags off redirect -1 prefixes {}}
+
+ test {Coverage: Basic CLIENT GETREDIR} {
+ r CLIENT GETREDIR
+ } {-1}
+}
diff --git a/tests/unit/type/hash.tcl b/tests/unit/type/hash.tcl
new file mode 100644
index 0000000..2a26f44
--- /dev/null
+++ b/tests/unit/type/hash.tcl
@@ -0,0 +1,846 @@
+start_server {tags {"hash"}} {
+ test {HSET/HLEN - Small hash creation} {
+ array set smallhash {}
+ for {set i 0} {$i < 8} {incr i} {
+ set key __avoid_collisions__[randstring 0 8 alpha]
+ set val __avoid_collisions__[randstring 0 8 alpha]
+ if {[info exists smallhash($key)]} {
+ incr i -1
+ continue
+ }
+ r hset smallhash $key $val
+ set smallhash($key) $val
+ }
+ list [r hlen smallhash]
+ } {8}
+
+ test {Is the small hash encoded with a listpack?} {
+ assert_encoding listpack smallhash
+ }
+
+ proc create_hash {key entries} {
+ r del $key
+ foreach entry $entries {
+ r hset $key [lindex $entry 0] [lindex $entry 1]
+ }
+ }
+
+ proc get_keys {l} {
+ set res {}
+ foreach entry $l {
+ set key [lindex $entry 0]
+ lappend res $key
+ }
+ return $res
+ }
+
+ foreach {type contents} "listpack {{a 1} {b 2} {c 3}} hashtable {{a 1} {b 2} {[randstring 70 90 alpha] 3}}" {
+ set original_max_value [lindex [r config get hash-max-ziplist-value] 1]
+ r config set hash-max-ziplist-value 10
+ create_hash myhash $contents
+ assert_encoding $type myhash
+
+ # coverage for objectComputeSize
+ assert_morethan [memory_usage myhash] 0
+
+ test "HRANDFIELD - $type" {
+ unset -nocomplain myhash
+ array set myhash {}
+ for {set i 0} {$i < 100} {incr i} {
+ set key [r hrandfield myhash]
+ set myhash($key) 1
+ }
+ assert_equal [lsort [get_keys $contents]] [lsort [array names myhash]]
+ }
+ r config set hash-max-ziplist-value $original_max_value
+ }
+
+ test "HRANDFIELD with RESP3" {
+ r hello 3
+ set res [r hrandfield myhash 3 withvalues]
+ assert_equal [llength $res] 3
+ assert_equal [llength [lindex $res 1]] 2
+
+ set res [r hrandfield myhash 3]
+ assert_equal [llength $res] 3
+ assert_equal [llength [lindex $res 1]] 1
+ r hello 2
+ }
+
+ test "HRANDFIELD count of 0 is handled correctly" {
+ r hrandfield myhash 0
+ } {}
+
+ test "HRANDFIELD count overflow" {
+ r hmset myhash a 1
+ assert_error {*value is out of range*} {r hrandfield myhash -9223372036854770000 withvalues}
+ assert_error {*value is out of range*} {r hrandfield myhash -9223372036854775808 withvalues}
+ assert_error {*value is out of range*} {r hrandfield myhash -9223372036854775808}
+ } {}
+
+ test "HRANDFIELD with <count> against non existing key" {
+ r hrandfield nonexisting_key 100
+ } {}
+
+ # Make sure we can distinguish between an empty array and a null response
+ r readraw 1
+
+ test "HRANDFIELD count of 0 is handled correctly - emptyarray" {
+ r hrandfield myhash 0
+ } {*0}
+
+ test "HRANDFIELD with <count> against non existing key - emptyarray" {
+ r hrandfield nonexisting_key 100
+ } {*0}
+
+ r readraw 0
+
+ foreach {type contents} "
+ hashtable {{a 1} {b 2} {c 3} {d 4} {e 5} {6 f} {7 g} {8 h} {9 i} {[randstring 70 90 alpha] 10}}
+ listpack {{a 1} {b 2} {c 3} {d 4} {e 5} {6 f} {7 g} {8 h} {9 i} {10 j}} " {
+ test "HRANDFIELD with <count> - $type" {
+ set original_max_value [lindex [r config get hash-max-ziplist-value] 1]
+ r config set hash-max-ziplist-value 10
+ create_hash myhash $contents
+ assert_encoding $type myhash
+
+ # create a dict for easy lookup
+ set mydict [dict create {*}[r hgetall myhash]]
+
+ # We'll stress different parts of the code, see the implementation
+ # of HRANDFIELD for more information, but basically there are
+ # four different code paths.
+
+ # PATH 1: Use negative count.
+
+ # 1) Check that it returns repeated elements with and without values.
+ set res [r hrandfield myhash -20]
+ assert_equal [llength $res] 20
+ set res [r hrandfield myhash -1001]
+ assert_equal [llength $res] 1001
+ # again with WITHVALUES
+ set res [r hrandfield myhash -20 withvalues]
+ assert_equal [llength $res] 40
+ set res [r hrandfield myhash -1001 withvalues]
+ assert_equal [llength $res] 2002
+
+ # Test random uniform distribution
+ # df = 9, 40 means 0.00001 probability
+ set res [r hrandfield myhash -1000]
+ assert_lessthan [chi_square_value $res] 40
+
+ # 2) Check that all the elements actually belong to the original hash.
+ foreach {key val} $res {
+ assert {[dict exists $mydict $key]}
+ }
+
+ # 3) Check that eventually all the elements are returned.
+ # Use both WITHVALUES and without
+ unset -nocomplain auxset
+ set iterations 1000
+ while {$iterations != 0} {
+ incr iterations -1
+ if {[expr {$iterations % 2}] == 0} {
+ set res [r hrandfield myhash -3 withvalues]
+ foreach {key val} $res {
+ dict append auxset $key $val
+ }
+ } else {
+ set res [r hrandfield myhash -3]
+ foreach key $res {
+ dict append auxset $key $val
+ }
+ }
+ if {[lsort [dict keys $mydict]] eq
+ [lsort [dict keys $auxset]]} {
+ break;
+ }
+ }
+ assert {$iterations != 0}
+
+ # PATH 2: positive count (unique behavior) with requested size
+ # equal or greater than set size.
+ foreach size {10 20} {
+ set res [r hrandfield myhash $size]
+ assert_equal [llength $res] 10
+ assert_equal [lsort $res] [lsort [dict keys $mydict]]
+
+ # again with WITHVALUES
+ set res [r hrandfield myhash $size withvalues]
+ assert_equal [llength $res] 20
+ assert_equal [lsort $res] [lsort $mydict]
+ }
+
+ # PATH 3: Ask almost as elements as there are in the set.
+ # In this case the implementation will duplicate the original
+ # set and will remove random elements up to the requested size.
+ #
+ # PATH 4: Ask a number of elements definitely smaller than
+ # the set size.
+ #
+ # We can test both the code paths just changing the size but
+ # using the same code.
+ foreach size {8 2} {
+ set res [r hrandfield myhash $size]
+ assert_equal [llength $res] $size
+ # again with WITHVALUES
+ set res [r hrandfield myhash $size withvalues]
+ assert_equal [llength $res] [expr {$size * 2}]
+
+ # 1) Check that all the elements actually belong to the
+ # original set.
+ foreach ele [dict keys $res] {
+ assert {[dict exists $mydict $ele]}
+ }
+
+ # 2) Check that eventually all the elements are returned.
+ # Use both WITHVALUES and without
+ unset -nocomplain auxset
+ unset -nocomplain allkey
+ set iterations [expr {1000 / $size}]
+ set all_ele_return false
+ while {$iterations != 0} {
+ incr iterations -1
+ if {[expr {$iterations % 2}] == 0} {
+ set res [r hrandfield myhash $size withvalues]
+ foreach {key value} $res {
+ dict append auxset $key $value
+ lappend allkey $key
+ }
+ } else {
+ set res [r hrandfield myhash $size]
+ foreach key $res {
+ dict append auxset $key
+ lappend allkey $key
+ }
+ }
+ if {[lsort [dict keys $mydict]] eq
+ [lsort [dict keys $auxset]]} {
+ set all_ele_return true
+ }
+ }
+ assert_equal $all_ele_return true
+ # df = 9, 40 means 0.00001 probability
+ assert_lessthan [chi_square_value $allkey] 40
+ }
+ }
+ r config set hash-max-ziplist-value $original_max_value
+ }
+
+
+ test {HSET/HLEN - Big hash creation} {
+ array set bighash {}
+ for {set i 0} {$i < 1024} {incr i} {
+ set key __avoid_collisions__[randstring 0 8 alpha]
+ set val __avoid_collisions__[randstring 0 8 alpha]
+ if {[info exists bighash($key)]} {
+ incr i -1
+ continue
+ }
+ r hset bighash $key $val
+ set bighash($key) $val
+ }
+ list [r hlen bighash]
+ } {1024}
+
+ test {Is the big hash encoded with an hash table?} {
+ assert_encoding hashtable bighash
+ }
+
+ test {HGET against the small hash} {
+ set err {}
+ foreach k [array names smallhash *] {
+ if {$smallhash($k) ne [r hget smallhash $k]} {
+ set err "$smallhash($k) != [r hget smallhash $k]"
+ break
+ }
+ }
+ set _ $err
+ } {}
+
+ test {HGET against the big hash} {
+ set err {}
+ foreach k [array names bighash *] {
+ if {$bighash($k) ne [r hget bighash $k]} {
+ set err "$bighash($k) != [r hget bighash $k]"
+ break
+ }
+ }
+ set _ $err
+ } {}
+
+ test {HGET against non existing key} {
+ set rv {}
+ lappend rv [r hget smallhash __123123123__]
+ lappend rv [r hget bighash __123123123__]
+ set _ $rv
+ } {{} {}}
+
+ test {HSET in update and insert mode} {
+ set rv {}
+ set k [lindex [array names smallhash *] 0]
+ lappend rv [r hset smallhash $k newval1]
+ set smallhash($k) newval1
+ lappend rv [r hget smallhash $k]
+ lappend rv [r hset smallhash __foobar123__ newval]
+ set k [lindex [array names bighash *] 0]
+ lappend rv [r hset bighash $k newval2]
+ set bighash($k) newval2
+ lappend rv [r hget bighash $k]
+ lappend rv [r hset bighash __foobar123__ newval]
+ lappend rv [r hdel smallhash __foobar123__]
+ lappend rv [r hdel bighash __foobar123__]
+ set _ $rv
+ } {0 newval1 1 0 newval2 1 1 1}
+
+ test {HSETNX target key missing - small hash} {
+ r hsetnx smallhash __123123123__ foo
+ r hget smallhash __123123123__
+ } {foo}
+
+ test {HSETNX target key exists - small hash} {
+ r hsetnx smallhash __123123123__ bar
+ set result [r hget smallhash __123123123__]
+ r hdel smallhash __123123123__
+ set _ $result
+ } {foo}
+
+ test {HSETNX target key missing - big hash} {
+ r hsetnx bighash __123123123__ foo
+ r hget bighash __123123123__
+ } {foo}
+
+ test {HSETNX target key exists - big hash} {
+ r hsetnx bighash __123123123__ bar
+ set result [r hget bighash __123123123__]
+ r hdel bighash __123123123__
+ set _ $result
+ } {foo}
+
+ test {HSET/HMSET wrong number of args} {
+ assert_error {*wrong number of arguments for 'hset' command} {r hset smallhash key1 val1 key2}
+ assert_error {*wrong number of arguments for 'hmset' command} {r hmset smallhash key1 val1 key2}
+ }
+
+ test {HMSET - small hash} {
+ set args {}
+ foreach {k v} [array get smallhash] {
+ set newval [randstring 0 8 alpha]
+ set smallhash($k) $newval
+ lappend args $k $newval
+ }
+ r hmset smallhash {*}$args
+ } {OK}
+
+ test {HMSET - big hash} {
+ set args {}
+ foreach {k v} [array get bighash] {
+ set newval [randstring 0 8 alpha]
+ set bighash($k) $newval
+ lappend args $k $newval
+ }
+ r hmset bighash {*}$args
+ } {OK}
+
+ test {HMGET against non existing key and fields} {
+ set rv {}
+ lappend rv [r hmget doesntexist __123123123__ __456456456__]
+ lappend rv [r hmget smallhash __123123123__ __456456456__]
+ lappend rv [r hmget bighash __123123123__ __456456456__]
+ set _ $rv
+ } {{{} {}} {{} {}} {{} {}}}
+
+ test {Hash commands against wrong type} {
+ r set wrongtype somevalue
+ assert_error "WRONGTYPE Operation against a key*" {r hmget wrongtype field1 field2}
+ assert_error "WRONGTYPE Operation against a key*" {r hrandfield wrongtype}
+ assert_error "WRONGTYPE Operation against a key*" {r hget wrongtype field1}
+ assert_error "WRONGTYPE Operation against a key*" {r hgetall wrongtype}
+ assert_error "WRONGTYPE Operation against a key*" {r hdel wrongtype field1}
+ assert_error "WRONGTYPE Operation against a key*" {r hincrby wrongtype field1 2}
+ assert_error "WRONGTYPE Operation against a key*" {r hincrbyfloat wrongtype field1 2.5}
+ assert_error "WRONGTYPE Operation against a key*" {r hstrlen wrongtype field1}
+ assert_error "WRONGTYPE Operation against a key*" {r hvals wrongtype}
+ assert_error "WRONGTYPE Operation against a key*" {r hkeys wrongtype}
+ assert_error "WRONGTYPE Operation against a key*" {r hexists wrongtype field1}
+ }
+
+ test {HMGET - small hash} {
+ set keys {}
+ set vals {}
+ foreach {k v} [array get smallhash] {
+ lappend keys $k
+ lappend vals $v
+ }
+ set err {}
+ set result [r hmget smallhash {*}$keys]
+ if {$vals ne $result} {
+ set err "$vals != $result"
+ break
+ }
+ set _ $err
+ } {}
+
+ test {HMGET - big hash} {
+ set keys {}
+ set vals {}
+ foreach {k v} [array get bighash] {
+ lappend keys $k
+ lappend vals $v
+ }
+ set err {}
+ set result [r hmget bighash {*}$keys]
+ if {$vals ne $result} {
+ set err "$vals != $result"
+ break
+ }
+ set _ $err
+ } {}
+
+ test {HKEYS - small hash} {
+ lsort [r hkeys smallhash]
+ } [lsort [array names smallhash *]]
+
+ test {HKEYS - big hash} {
+ lsort [r hkeys bighash]
+ } [lsort [array names bighash *]]
+
+ test {HVALS - small hash} {
+ set vals {}
+ foreach {k v} [array get smallhash] {
+ lappend vals $v
+ }
+ set _ [lsort $vals]
+ } [lsort [r hvals smallhash]]
+
+ test {HVALS - big hash} {
+ set vals {}
+ foreach {k v} [array get bighash] {
+ lappend vals $v
+ }
+ set _ [lsort $vals]
+ } [lsort [r hvals bighash]]
+
+ test {HGETALL - small hash} {
+ lsort [r hgetall smallhash]
+ } [lsort [array get smallhash]]
+
+ test {HGETALL - big hash} {
+ lsort [r hgetall bighash]
+ } [lsort [array get bighash]]
+
+ test {HDEL and return value} {
+ set rv {}
+ lappend rv [r hdel smallhash nokey]
+ lappend rv [r hdel bighash nokey]
+ set k [lindex [array names smallhash *] 0]
+ lappend rv [r hdel smallhash $k]
+ lappend rv [r hdel smallhash $k]
+ lappend rv [r hget smallhash $k]
+ unset smallhash($k)
+ set k [lindex [array names bighash *] 0]
+ lappend rv [r hdel bighash $k]
+ lappend rv [r hdel bighash $k]
+ lappend rv [r hget bighash $k]
+ unset bighash($k)
+ set _ $rv
+ } {0 0 1 0 {} 1 0 {}}
+
+ test {HDEL - more than a single value} {
+ set rv {}
+ r del myhash
+ r hmset myhash a 1 b 2 c 3
+ assert_equal 0 [r hdel myhash x y]
+ assert_equal 2 [r hdel myhash a c f]
+ r hgetall myhash
+ } {b 2}
+
+ test {HDEL - hash becomes empty before deleting all specified fields} {
+ r del myhash
+ r hmset myhash a 1 b 2 c 3
+ assert_equal 3 [r hdel myhash a b c d e]
+ assert_equal 0 [r exists myhash]
+ }
+
+ test {HEXISTS} {
+ set rv {}
+ set k [lindex [array names smallhash *] 0]
+ lappend rv [r hexists smallhash $k]
+ lappend rv [r hexists smallhash nokey]
+ set k [lindex [array names bighash *] 0]
+ lappend rv [r hexists bighash $k]
+ lappend rv [r hexists bighash nokey]
+ } {1 0 1 0}
+
+ test {Is a ziplist encoded Hash promoted on big payload?} {
+ r hset smallhash foo [string repeat a 1024]
+ r debug object smallhash
+ } {*hashtable*} {needs:debug}
+
+ test {HINCRBY against non existing database key} {
+ r del htest
+ list [r hincrby htest foo 2]
+ } {2}
+
+ test {HINCRBY against non existing hash key} {
+ set rv {}
+ r hdel smallhash tmp
+ r hdel bighash tmp
+ lappend rv [r hincrby smallhash tmp 2]
+ lappend rv [r hget smallhash tmp]
+ lappend rv [r hincrby bighash tmp 2]
+ lappend rv [r hget bighash tmp]
+ } {2 2 2 2}
+
+ test {HINCRBY against hash key created by hincrby itself} {
+ set rv {}
+ lappend rv [r hincrby smallhash tmp 3]
+ lappend rv [r hget smallhash tmp]
+ lappend rv [r hincrby bighash tmp 3]
+ lappend rv [r hget bighash tmp]
+ } {5 5 5 5}
+
+ test {HINCRBY against hash key originally set with HSET} {
+ r hset smallhash tmp 100
+ r hset bighash tmp 100
+ list [r hincrby smallhash tmp 2] [r hincrby bighash tmp 2]
+ } {102 102}
+
+ test {HINCRBY over 32bit value} {
+ r hset smallhash tmp 17179869184
+ r hset bighash tmp 17179869184
+ list [r hincrby smallhash tmp 1] [r hincrby bighash tmp 1]
+ } {17179869185 17179869185}
+
+ test {HINCRBY over 32bit value with over 32bit increment} {
+ r hset smallhash tmp 17179869184
+ r hset bighash tmp 17179869184
+ list [r hincrby smallhash tmp 17179869184] [r hincrby bighash tmp 17179869184]
+ } {34359738368 34359738368}
+
+ test {HINCRBY fails against hash value with spaces (left)} {
+ r hset smallhash str " 11"
+ r hset bighash str " 11"
+ catch {r hincrby smallhash str 1} smallerr
+ catch {r hincrby bighash str 1} bigerr
+ set rv {}
+ lappend rv [string match "ERR *not an integer*" $smallerr]
+ lappend rv [string match "ERR *not an integer*" $bigerr]
+ } {1 1}
+
+ test {HINCRBY fails against hash value with spaces (right)} {
+ r hset smallhash str "11 "
+ r hset bighash str "11 "
+ catch {r hincrby smallhash str 1} smallerr
+ catch {r hincrby bighash str 1} bigerr
+ set rv {}
+ lappend rv [string match "ERR *not an integer*" $smallerr]
+ lappend rv [string match "ERR *not an integer*" $bigerr]
+ } {1 1}
+
+ test {HINCRBY can detect overflows} {
+ set e {}
+ r hset hash n -9223372036854775484
+ assert {[r hincrby hash n -1] == -9223372036854775485}
+ catch {r hincrby hash n -10000} e
+ set e
+ } {*overflow*}
+
+ test {HINCRBYFLOAT against non existing database key} {
+ r del htest
+ list [r hincrbyfloat htest foo 2.5]
+ } {2.5}
+
+ test {HINCRBYFLOAT against non existing hash key} {
+ set rv {}
+ r hdel smallhash tmp
+ r hdel bighash tmp
+ lappend rv [roundFloat [r hincrbyfloat smallhash tmp 2.5]]
+ lappend rv [roundFloat [r hget smallhash tmp]]
+ lappend rv [roundFloat [r hincrbyfloat bighash tmp 2.5]]
+ lappend rv [roundFloat [r hget bighash tmp]]
+ } {2.5 2.5 2.5 2.5}
+
+ test {HINCRBYFLOAT against hash key created by hincrby itself} {
+ set rv {}
+ lappend rv [roundFloat [r hincrbyfloat smallhash tmp 3.5]]
+ lappend rv [roundFloat [r hget smallhash tmp]]
+ lappend rv [roundFloat [r hincrbyfloat bighash tmp 3.5]]
+ lappend rv [roundFloat [r hget bighash tmp]]
+ } {6 6 6 6}
+
+ test {HINCRBYFLOAT against hash key originally set with HSET} {
+ r hset smallhash tmp 100
+ r hset bighash tmp 100
+ list [roundFloat [r hincrbyfloat smallhash tmp 2.5]] \
+ [roundFloat [r hincrbyfloat bighash tmp 2.5]]
+ } {102.5 102.5}
+
+ test {HINCRBYFLOAT over 32bit value} {
+ r hset smallhash tmp 17179869184
+ r hset bighash tmp 17179869184
+ list [r hincrbyfloat smallhash tmp 1] \
+ [r hincrbyfloat bighash tmp 1]
+ } {17179869185 17179869185}
+
+ test {HINCRBYFLOAT over 32bit value with over 32bit increment} {
+ r hset smallhash tmp 17179869184
+ r hset bighash tmp 17179869184
+ list [r hincrbyfloat smallhash tmp 17179869184] \
+ [r hincrbyfloat bighash tmp 17179869184]
+ } {34359738368 34359738368}
+
+ test {HINCRBYFLOAT fails against hash value with spaces (left)} {
+ r hset smallhash str " 11"
+ r hset bighash str " 11"
+ catch {r hincrbyfloat smallhash str 1} smallerr
+ catch {r hincrbyfloat bighash str 1} bigerr
+ set rv {}
+ lappend rv [string match "ERR *not*float*" $smallerr]
+ lappend rv [string match "ERR *not*float*" $bigerr]
+ } {1 1}
+
+ test {HINCRBYFLOAT fails against hash value with spaces (right)} {
+ r hset smallhash str "11 "
+ r hset bighash str "11 "
+ catch {r hincrbyfloat smallhash str 1} smallerr
+ catch {r hincrbyfloat bighash str 1} bigerr
+ set rv {}
+ lappend rv [string match "ERR *not*float*" $smallerr]
+ lappend rv [string match "ERR *not*float*" $bigerr]
+ } {1 1}
+
+ test {HINCRBYFLOAT fails against hash value that contains a null-terminator in the middle} {
+ r hset h f "1\x002"
+ catch {r hincrbyfloat h f 1} err
+ set rv {}
+ lappend rv [string match "ERR *not*float*" $err]
+ } {1}
+
+ test {HSTRLEN against the small hash} {
+ set err {}
+ foreach k [array names smallhash *] {
+ if {[string length $smallhash($k)] ne [r hstrlen smallhash $k]} {
+ set err "[string length $smallhash($k)] != [r hstrlen smallhash $k]"
+ break
+ }
+ }
+ set _ $err
+ } {}
+
+ test {HSTRLEN against the big hash} {
+ set err {}
+ foreach k [array names bighash *] {
+ if {[string length $bighash($k)] ne [r hstrlen bighash $k]} {
+ set err "[string length $bighash($k)] != [r hstrlen bighash $k]"
+ puts "HSTRLEN and logical length mismatch:"
+ puts "key: $k"
+ puts "Logical content: $bighash($k)"
+ puts "Server content: [r hget bighash $k]"
+ }
+ }
+ set _ $err
+ } {}
+
+ test {HSTRLEN against non existing field} {
+ set rv {}
+ lappend rv [r hstrlen smallhash __123123123__]
+ lappend rv [r hstrlen bighash __123123123__]
+ set _ $rv
+ } {0 0}
+
+ test {HSTRLEN corner cases} {
+ set vals {
+ -9223372036854775808 9223372036854775807 9223372036854775808
+ {} 0 -1 x
+ }
+ foreach v $vals {
+ r hmset smallhash field $v
+ r hmset bighash field $v
+ set len1 [string length $v]
+ set len2 [r hstrlen smallhash field]
+ set len3 [r hstrlen bighash field]
+ assert {$len1 == $len2}
+ assert {$len2 == $len3}
+ }
+ }
+
+ test {HINCRBYFLOAT over hash-max-listpack-value encoded with a listpack} {
+ set original_max_value [lindex [r config get hash-max-ziplist-value] 1]
+ r config set hash-max-listpack-value 8
+
+ # hash's value exceeds hash-max-listpack-value
+ r del smallhash
+ r del bighash
+ r hset smallhash tmp 0
+ r hset bighash tmp 0
+ r hincrbyfloat smallhash tmp 0.000005
+ r hincrbyfloat bighash tmp 0.0000005
+ assert_encoding listpack smallhash
+ assert_encoding hashtable bighash
+
+ # hash's field exceeds hash-max-listpack-value
+ r del smallhash
+ r del bighash
+ r hincrbyfloat smallhash abcdefgh 1
+ r hincrbyfloat bighash abcdefghi 1
+ assert_encoding listpack smallhash
+ assert_encoding hashtable bighash
+
+ r config set hash-max-listpack-value $original_max_value
+ }
+
+ test {Hash ziplist regression test for large keys} {
+ r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk a
+ r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk b
+ r hget hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk
+ } {b}
+
+ foreach size {10 512} {
+ test "Hash fuzzing #1 - $size fields" {
+ for {set times 0} {$times < 10} {incr times} {
+ catch {unset hash}
+ array set hash {}
+ r del hash
+
+ # Create
+ for {set j 0} {$j < $size} {incr j} {
+ set field [randomValue]
+ set value [randomValue]
+ r hset hash $field $value
+ set hash($field) $value
+ }
+
+ # Verify
+ foreach {k v} [array get hash] {
+ assert_equal $v [r hget hash $k]
+ }
+ assert_equal [array size hash] [r hlen hash]
+ }
+ }
+
+ test "Hash fuzzing #2 - $size fields" {
+ for {set times 0} {$times < 10} {incr times} {
+ catch {unset hash}
+ array set hash {}
+ r del hash
+
+ # Create
+ for {set j 0} {$j < $size} {incr j} {
+ randpath {
+ set field [randomValue]
+ set value [randomValue]
+ r hset hash $field $value
+ set hash($field) $value
+ } {
+ set field [randomSignedInt 512]
+ set value [randomSignedInt 512]
+ r hset hash $field $value
+ set hash($field) $value
+ } {
+ randpath {
+ set field [randomValue]
+ } {
+ set field [randomSignedInt 512]
+ }
+ r hdel hash $field
+ unset -nocomplain hash($field)
+ }
+ }
+
+ # Verify
+ foreach {k v} [array get hash] {
+ assert_equal $v [r hget hash $k]
+ }
+ assert_equal [array size hash] [r hlen hash]
+ }
+ }
+ }
+
+ test {Stress test the hash ziplist -> hashtable encoding conversion} {
+ r config set hash-max-ziplist-entries 32
+ for {set j 0} {$j < 100} {incr j} {
+ r del myhash
+ for {set i 0} {$i < 64} {incr i} {
+ r hset myhash [randomValue] [randomValue]
+ }
+ assert_encoding hashtable myhash
+ }
+ }
+
+ # The following test can only be executed if we don't use Valgrind, and if
+ # we are using x86_64 architecture, because:
+ #
+ # 1) Valgrind has floating point limitations, no support for 80 bits math.
+ # 2) Other archs may have the same limits.
+ #
+ # 1.23 cannot be represented correctly with 64 bit doubles, so we skip
+ # the test, since we are only testing pretty printing here and is not
+ # a bug if the program outputs things like 1.299999...
+ if {!$::valgrind && [string match *x86_64* [exec uname -a]]} {
+ test {Test HINCRBYFLOAT for correct float representation (issue #2846)} {
+ r del myhash
+ assert {[r hincrbyfloat myhash float 1.23] eq {1.23}}
+ assert {[r hincrbyfloat myhash float 0.77] eq {2}}
+ assert {[r hincrbyfloat myhash float -0.1] eq {1.9}}
+ }
+ }
+
+ test {Hash ziplist of various encodings} {
+ r del k
+ config_set hash-max-ziplist-entries 1000000000
+ config_set hash-max-ziplist-value 1000000000
+ r hset k ZIP_INT_8B 127
+ r hset k ZIP_INT_16B 32767
+ r hset k ZIP_INT_32B 2147483647
+ r hset k ZIP_INT_64B 9223372036854775808
+ r hset k ZIP_INT_IMM_MIN 0
+ r hset k ZIP_INT_IMM_MAX 12
+ r hset k ZIP_STR_06B [string repeat x 31]
+ r hset k ZIP_STR_14B [string repeat x 8191]
+ r hset k ZIP_STR_32B [string repeat x 65535]
+ set k [r hgetall k]
+ set dump [r dump k]
+
+ # will be converted to dict at RESTORE
+ config_set hash-max-ziplist-entries 2
+ config_set sanitize-dump-payload no mayfail
+ r restore kk 0 $dump
+ set kk [r hgetall kk]
+
+ # make sure the values are right
+ assert_equal [lsort $k] [lsort $kk]
+ assert_equal [dict get $k ZIP_STR_06B] [string repeat x 31]
+ set k [dict remove $k ZIP_STR_06B]
+ assert_equal [dict get $k ZIP_STR_14B] [string repeat x 8191]
+ set k [dict remove $k ZIP_STR_14B]
+ assert_equal [dict get $k ZIP_STR_32B] [string repeat x 65535]
+ set k [dict remove $k ZIP_STR_32B]
+ set _ $k
+ } {ZIP_INT_8B 127 ZIP_INT_16B 32767 ZIP_INT_32B 2147483647 ZIP_INT_64B 9223372036854775808 ZIP_INT_IMM_MIN 0 ZIP_INT_IMM_MAX 12}
+
+ test {Hash ziplist of various encodings - sanitize dump} {
+ config_set sanitize-dump-payload yes mayfail
+ r restore kk 0 $dump replace
+ set k [r hgetall k]
+ set kk [r hgetall kk]
+
+ # make sure the values are right
+ assert_equal [lsort $k] [lsort $kk]
+ assert_equal [dict get $k ZIP_STR_06B] [string repeat x 31]
+ set k [dict remove $k ZIP_STR_06B]
+ assert_equal [dict get $k ZIP_STR_14B] [string repeat x 8191]
+ set k [dict remove $k ZIP_STR_14B]
+ assert_equal [dict get $k ZIP_STR_32B] [string repeat x 65535]
+ set k [dict remove $k ZIP_STR_32B]
+ set _ $k
+ } {ZIP_INT_8B 127 ZIP_INT_16B 32767 ZIP_INT_32B 2147483647 ZIP_INT_64B 9223372036854775808 ZIP_INT_IMM_MIN 0 ZIP_INT_IMM_MAX 12}
+
+ # On some platforms strtold("+inf") with valgrind returns a non-inf result
+ if {!$::valgrind} {
+ test {HINCRBYFLOAT does not allow NaN or Infinity} {
+ assert_error "*value is NaN or Infinity*" {r hincrbyfloat hfoo field +inf}
+ assert_equal 0 [r exists hfoo]
+ }
+ }
+}
diff --git a/tests/unit/type/incr.tcl b/tests/unit/type/incr.tcl
new file mode 100644
index 0000000..2319b2c
--- /dev/null
+++ b/tests/unit/type/incr.tcl
@@ -0,0 +1,214 @@
+start_server {tags {"incr"}} {
+ test {INCR against non existing key} {
+ set res {}
+ append res [r incr novar]
+ append res [r get novar]
+ } {11}
+
+ test {INCR against key created by incr itself} {
+ r incr novar
+ } {2}
+
+ test {DECR against key created by incr} {
+ r decr novar
+ } {1}
+
+ test {DECR against key is not exist and incr} {
+ r del novar_not_exist
+ assert_equal {-1} [r decr novar_not_exist]
+ assert_equal {0} [r incr novar_not_exist]
+ }
+
+ test {INCR against key originally set with SET} {
+ r set novar 100
+ r incr novar
+ } {101}
+
+ test {INCR over 32bit value} {
+ r set novar 17179869184
+ r incr novar
+ } {17179869185}
+
+ test {INCRBY over 32bit value with over 32bit increment} {
+ r set novar 17179869184
+ r incrby novar 17179869184
+ } {34359738368}
+
+ test {INCR fails against key with spaces (left)} {
+ r set novar " 11"
+ catch {r incr novar} err
+ format $err
+ } {ERR*}
+
+ test {INCR fails against key with spaces (right)} {
+ r set novar "11 "
+ catch {r incr novar} err
+ format $err
+ } {ERR*}
+
+ test {INCR fails against key with spaces (both)} {
+ r set novar " 11 "
+ catch {r incr novar} err
+ format $err
+ } {ERR*}
+
+ test {DECRBY negation overflow} {
+ r set x 0
+ catch {r decrby x -9223372036854775808} err
+ format $err
+ } {ERR*}
+
+ test {INCR fails against a key holding a list} {
+ r rpush mylist 1
+ catch {r incr mylist} err
+ r rpop mylist
+ format $err
+ } {WRONGTYPE*}
+
+ test {DECRBY over 32bit value with over 32bit increment, negative res} {
+ r set novar 17179869184
+ r decrby novar 17179869185
+ } {-1}
+
+ test {DECRBY against key is not exist} {
+ r del key_not_exist
+ assert_equal {-1} [r decrby key_not_exist 1]
+ }
+
+ test {INCR uses shared objects in the 0-9999 range} {
+ r set foo -1
+ r incr foo
+ assert_refcount_morethan foo 1
+ r set foo 9998
+ r incr foo
+ assert_refcount_morethan foo 1
+ r incr foo
+ assert_refcount 1 foo
+ }
+
+ test {INCR can modify objects in-place} {
+ r set foo 20000
+ r incr foo
+ assert_refcount 1 foo
+ set old [lindex [split [r debug object foo]] 1]
+ r incr foo
+ set new [lindex [split [r debug object foo]] 1]
+ assert {[string range $old 0 2] eq "at:"}
+ assert {[string range $new 0 2] eq "at:"}
+ assert {$old eq $new}
+ } {} {needs:debug}
+
+ test {INCRBYFLOAT against non existing key} {
+ r del novar
+ list [roundFloat [r incrbyfloat novar 1]] \
+ [roundFloat [r get novar]] \
+ [roundFloat [r incrbyfloat novar 0.25]] \
+ [roundFloat [r get novar]]
+ } {1 1 1.25 1.25}
+
+ test {INCRBYFLOAT against key originally set with SET} {
+ r set novar 1.5
+ roundFloat [r incrbyfloat novar 1.5]
+ } {3}
+
+ test {INCRBYFLOAT over 32bit value} {
+ r set novar 17179869184
+ r incrbyfloat novar 1.5
+ } {17179869185.5}
+
+ test {INCRBYFLOAT over 32bit value with over 32bit increment} {
+ r set novar 17179869184
+ r incrbyfloat novar 17179869184
+ } {34359738368}
+
+ test {INCRBYFLOAT fails against key with spaces (left)} {
+ set err {}
+ r set novar " 11"
+ catch {r incrbyfloat novar 1.0} err
+ format $err
+ } {ERR *valid*}
+
+ test {INCRBYFLOAT fails against key with spaces (right)} {
+ set err {}
+ r set novar "11 "
+ catch {r incrbyfloat novar 1.0} err
+ format $err
+ } {ERR *valid*}
+
+ test {INCRBYFLOAT fails against key with spaces (both)} {
+ set err {}
+ r set novar " 11 "
+ catch {r incrbyfloat novar 1.0} err
+ format $err
+ } {ERR *valid*}
+
+ test {INCRBYFLOAT fails against a key holding a list} {
+ r del mylist
+ set err {}
+ r rpush mylist 1
+ catch {r incrbyfloat mylist 1.0} err
+ r del mylist
+ format $err
+ } {WRONGTYPE*}
+
+ # On some platforms strtold("+inf") with valgrind returns a non-inf result
+ if {!$::valgrind} {
+ test {INCRBYFLOAT does not allow NaN or Infinity} {
+ r set foo 0
+ set err {}
+ catch {r incrbyfloat foo +inf} err
+ set err
+ # p.s. no way I can force NaN to test it from the API because
+ # there is no way to increment / decrement by infinity nor to
+ # perform divisions.
+ } {ERR *would produce*}
+ }
+
+ test {INCRBYFLOAT decrement} {
+ r set foo 1
+ roundFloat [r incrbyfloat foo -1.1]
+ } {-0.1}
+
+ test {string to double with null terminator} {
+ r set foo 1
+ r setrange foo 2 2
+ catch {r incrbyfloat foo 1} err
+ format $err
+ } {ERR *valid*}
+
+ test {No negative zero} {
+ r del foo
+ r incrbyfloat foo [expr double(1)/41]
+ r incrbyfloat foo [expr double(-1)/41]
+ r get foo
+ } {0}
+
+ foreach cmd {"incr" "decr" "incrby" "decrby"} {
+ test "$cmd operation should update encoding from raw to int" {
+ set res {}
+ set expected {1 12}
+ if {[string match {*incr*} $cmd]} {
+ lappend expected 13
+ } else {
+ lappend expected 11
+ }
+
+ r set foo 1
+ assert_encoding "int" foo
+ lappend res [r get foo]
+
+ r append foo 2
+ assert_encoding "raw" foo
+ lappend res [r get foo]
+
+ if {[string match {*by*} $cmd]} {
+ r $cmd foo 1
+ } else {
+ r $cmd foo
+ }
+ assert_encoding "int" foo
+ lappend res [r get foo]
+ assert_equal $res $expected
+ }
+ }
+}
diff --git a/tests/unit/type/list-2.tcl b/tests/unit/type/list-2.tcl
new file mode 100644
index 0000000..5874a90
--- /dev/null
+++ b/tests/unit/type/list-2.tcl
@@ -0,0 +1,47 @@
+start_server {
+ tags {"list"}
+ overrides {
+ "list-max-ziplist-size" 4
+ }
+} {
+ source "tests/unit/type/list-common.tcl"
+
+ foreach {type large} [array get largevalue] {
+ tags {"slow"} {
+ test "LTRIM stress testing - $type" {
+ set mylist {}
+ set startlen 32
+ r del mylist
+
+ # Start with the large value to ensure the
+ # right encoding is used.
+ r rpush mylist $large
+ lappend mylist $large
+
+ for {set i 0} {$i < $startlen} {incr i} {
+ set str [randomInt 9223372036854775807]
+ r rpush mylist $str
+ lappend mylist $str
+ }
+
+ for {set i 0} {$i < 1000} {incr i} {
+ set min [expr {int(rand()*$startlen)}]
+ set max [expr {$min+int(rand()*$startlen)}]
+ set before_len [llength $mylist]
+ set before_len_r [r llen mylist]
+ assert_equal $before_len $before_len_r
+ set mylist [lrange $mylist $min $max]
+ r ltrim mylist $min $max
+ assert_equal $mylist [r lrange mylist 0 -1] "failed trim"
+
+ for {set j [r llen mylist]} {$j < $startlen} {incr j} {
+ set str [randomInt 9223372036854775807]
+ r rpush mylist $str
+ lappend mylist $str
+ assert_equal $mylist [r lrange mylist 0 -1] "failed append match"
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/unit/type/list-3.tcl b/tests/unit/type/list-3.tcl
new file mode 100644
index 0000000..45df593
--- /dev/null
+++ b/tests/unit/type/list-3.tcl
@@ -0,0 +1,232 @@
+proc generate_cmd_on_list_key {key} {
+ set op [randomInt 7]
+ set small_signed_count [expr 5-[randomInt 10]]
+ if {[randomInt 2] == 0} {
+ set ele [randomInt 1000]
+ } else {
+ set ele [string repeat x [randomInt 10000]][randomInt 1000]
+ }
+ switch $op {
+ 0 {return "lpush $key $ele"}
+ 1 {return "rpush $key $ele"}
+ 2 {return "lpop $key"}
+ 3 {return "rpop $key"}
+ 4 {
+ return "lset $key $small_signed_count $ele"
+ }
+ 5 {
+ set otherele [randomInt 1000]
+ if {[randomInt 2] == 0} {
+ set where before
+ } else {
+ set where after
+ }
+ return "linsert $key $where $otherele $ele"
+ }
+ 6 {
+ set otherele ""
+ catch {
+ set index [randomInt [r llen $key]]
+ set otherele [r lindex $key $index]
+ }
+ return "lrem $key 1 $otherele"
+ }
+ }
+}
+
+start_server {
+ tags {"list ziplist"}
+ overrides {
+ "list-max-ziplist-size" 16
+ }
+} {
+ test {Explicit regression for a list bug} {
+ set mylist {49376042582 {BkG2o\pIC]4YYJa9cJ4GWZalG[4tin;1D2whSkCOW`mX;SFXGyS8sedcff3fQI^tgPCC@^Nu1J6o]meM@Lko]t_jRyo<xSJ1oObDYd`ppZuW6P@fS278YaOx=s6lvdFlMbP0[SbkI^Kr\HBXtuFaA^mDx:yzS4a[skiiPWhT<nNfAf=aQVfclcuwDrfe;iVuKdNvB9kbfq>tK?tH[\EvWqS]b`o2OCtjg:?nUTwdjpcUm]y:pg5q24q7LlCOwQE^}}
+ r del l
+ r rpush l [lindex $mylist 0]
+ r rpush l [lindex $mylist 1]
+ assert_equal [r lindex l 0] [lindex $mylist 0]
+ assert_equal [r lindex l 1] [lindex $mylist 1]
+ }
+
+ test {Regression for quicklist #3343 bug} {
+ r del mylist
+ r lpush mylist 401
+ r lpush mylist 392
+ r rpush mylist [string repeat x 5105]"799"
+ r lset mylist -1 [string repeat x 1014]"702"
+ r lpop mylist
+ r lset mylist -1 [string repeat x 4149]"852"
+ r linsert mylist before 401 [string repeat x 9927]"12"
+ r lrange mylist 0 -1
+ r ping ; # It's enough if the server is still alive
+ } {PONG}
+
+ test {Check compression with recompress} {
+ r del key
+ config_set list-compress-depth 1
+ config_set list-max-ziplist-size 16
+ r rpush key a
+ r rpush key [string repeat b 50000]
+ r rpush key c
+ r lset key 1 d
+ r rpop key
+ r rpush key [string repeat e 5000]
+ r linsert key before f 1
+ r rpush key g
+ r ping
+ }
+
+ test {Crash due to wrongly recompress after lrem} {
+ r del key
+ config_set list-compress-depth 2
+ r lpush key a
+ r lpush key [string repeat a 5000]
+ r lpush key [string repeat b 5000]
+ r lpush key [string repeat c 5000]
+ r rpush key [string repeat x 10000]"969"
+ r rpush key b
+ r lrem key 1 a
+ r rpop key
+ r lrem key 1 [string repeat x 10000]"969"
+ r rpush key crash
+ r ping
+ }
+
+ test {LINSERT correctly recompress full quicklistNode after inserting a element before it} {
+ r del key
+ config_set list-compress-depth 1
+ r rpush key b
+ r rpush key c
+ r lset key -1 [string repeat x 8192]"969"
+ r lpush key a
+ r rpush key d
+ r linsert key before b f
+ r rpop key
+ r ping
+ }
+
+ test {LINSERT correctly recompress full quicklistNode after inserting a element after it} {
+ r del key
+ config_set list-compress-depth 1
+ r rpush key b
+ r rpush key c
+ r lset key 0 [string repeat x 8192]"969"
+ r lpush key a
+ r rpush key d
+ r linsert key after c f
+ r lpop key
+ r ping
+ }
+
+foreach comp {2 1 0} {
+ set cycles 1000
+ if {$::accurate} { set cycles 10000 }
+ config_set list-compress-depth $comp
+
+ test "Stress tester for #3343-alike bugs comp: $comp" {
+ r del key
+ set sent {}
+ for {set j 0} {$j < $cycles} {incr j} {
+ catch {
+ set cmd [generate_cmd_on_list_key key]
+ lappend sent $cmd
+
+ # execute the command, we expect commands to fail on syntax errors
+ r {*}$cmd
+ }
+ }
+
+ set print_commands false
+ set crash false
+ if {[catch {r ping}]} {
+ puts "Server crashed"
+ set print_commands true
+ set crash true
+ }
+
+ if {!$::external} {
+ # check valgrind and asan report for invalid reads after execute
+ # command so that we have a report that is easier to reproduce
+ set valgrind_errors [find_valgrind_errors [srv 0 stderr] false]
+ set asan_errors [sanitizer_errors_from_file [srv 0 stderr]]
+ if {$valgrind_errors != "" || $asan_errors != ""} {
+ puts "valgrind or asan found an issue"
+ set print_commands true
+ }
+ }
+
+ if {$print_commands} {
+ puts "violating commands:"
+ foreach cmd $sent {
+ puts $cmd
+ }
+ }
+
+ assert_equal $crash false
+ }
+} ;# foreach comp
+
+ tags {slow} {
+ test {ziplist implementation: value encoding and backlink} {
+ if {$::accurate} {set iterations 100} else {set iterations 10}
+ for {set j 0} {$j < $iterations} {incr j} {
+ r del l
+ set l {}
+ for {set i 0} {$i < 200} {incr i} {
+ randpath {
+ set data [string repeat x [randomInt 100000]]
+ } {
+ set data [randomInt 65536]
+ } {
+ set data [randomInt 4294967296]
+ } {
+ set data [randomInt 18446744073709551616]
+ } {
+ set data -[randomInt 65536]
+ if {$data eq {-0}} {set data 0}
+ } {
+ set data -[randomInt 4294967296]
+ if {$data eq {-0}} {set data 0}
+ } {
+ set data -[randomInt 18446744073709551616]
+ if {$data eq {-0}} {set data 0}
+ }
+ lappend l $data
+ r rpush l $data
+ }
+ assert_equal [llength $l] [r llen l]
+ # Traverse backward
+ for {set i 199} {$i >= 0} {incr i -1} {
+ if {[lindex $l $i] ne [r lindex l $i]} {
+ assert_equal [lindex $l $i] [r lindex l $i]
+ }
+ }
+ }
+ }
+
+ test {ziplist implementation: encoding stress testing} {
+ for {set j 0} {$j < 200} {incr j} {
+ r del l
+ set l {}
+ set len [randomInt 400]
+ for {set i 0} {$i < $len} {incr i} {
+ set rv [randomValue]
+ randpath {
+ lappend l $rv
+ r rpush l $rv
+ } {
+ set l [concat [list $rv] $l]
+ r lpush l $rv
+ }
+ }
+ assert_equal [llength $l] [r llen l]
+ for {set i 0} {$i < $len} {incr i} {
+ if {[lindex $l $i] ne [r lindex l $i]} {
+ assert_equal [lindex $l $i] [r lindex l $i]
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/tests/unit/type/list-common.tcl b/tests/unit/type/list-common.tcl
new file mode 100644
index 0000000..b393737
--- /dev/null
+++ b/tests/unit/type/list-common.tcl
@@ -0,0 +1,4 @@
+# We need a value to make sure the list has the right encoding when it is inserted.
+array set largevalue {}
+set largevalue(listpack) "hello"
+set largevalue(quicklist) [string repeat "x" 8192]
diff --git a/tests/unit/type/list.tcl b/tests/unit/type/list.tcl
new file mode 100644
index 0000000..993b6d1
--- /dev/null
+++ b/tests/unit/type/list.tcl
@@ -0,0 +1,2363 @@
+# check functionality compression of plain and zipped nodes
+start_server [list overrides [list save ""] ] {
+ r config set list-compress-depth 2
+ r config set list-max-ziplist-size 1
+
+ # 3 test to check compression with regular ziplist nodes
+ # 1. using push + insert
+ # 2. using push + insert + trim
+ # 3. using push + insert + set
+
+ test {reg node check compression with insert and pop} {
+ r lpush list1 [string repeat a 500]
+ r lpush list1 [string repeat b 500]
+ r lpush list1 [string repeat c 500]
+ r lpush list1 [string repeat d 500]
+ r linsert list1 after [string repeat d 500] [string repeat e 500]
+ r linsert list1 after [string repeat d 500] [string repeat f 500]
+ r linsert list1 after [string repeat d 500] [string repeat g 500]
+ r linsert list1 after [string repeat d 500] [string repeat j 500]
+ assert_equal [r lpop list1] [string repeat d 500]
+ assert_equal [r lpop list1] [string repeat j 500]
+ assert_equal [r lpop list1] [string repeat g 500]
+ assert_equal [r lpop list1] [string repeat f 500]
+ assert_equal [r lpop list1] [string repeat e 500]
+ assert_equal [r lpop list1] [string repeat c 500]
+ assert_equal [r lpop list1] [string repeat b 500]
+ assert_equal [r lpop list1] [string repeat a 500]
+ };
+
+ test {reg node check compression combined with trim} {
+ r lpush list2 [string repeat a 500]
+ r linsert list2 after [string repeat a 500] [string repeat b 500]
+ r rpush list2 [string repeat c 500]
+ assert_equal [string repeat b 500] [r lindex list2 1]
+ r LTRIM list2 1 -1
+ r llen list2
+ } {2}
+
+ test {reg node check compression with lset} {
+ r lpush list3 [string repeat a 500]
+ r LSET list3 0 [string repeat b 500]
+ assert_equal [string repeat b 500] [r lindex list3 0]
+ r lpush list3 [string repeat c 500]
+ r LSET list3 0 [string repeat d 500]
+ assert_equal [string repeat d 500] [r lindex list3 0]
+ }
+
+ # repeating the 3 tests with plain nodes
+ # (by adjusting quicklist-packed-threshold)
+
+ test {plain node check compression} {
+ r debug quicklist-packed-threshold 1b
+ r lpush list4 [string repeat a 500]
+ r lpush list4 [string repeat b 500]
+ r lpush list4 [string repeat c 500]
+ r lpush list4 [string repeat d 500]
+ r linsert list4 after [string repeat d 500] [string repeat e 500]
+ r linsert list4 after [string repeat d 500] [string repeat f 500]
+ r linsert list4 after [string repeat d 500] [string repeat g 500]
+ r linsert list4 after [string repeat d 500] [string repeat j 500]
+ assert_equal [r lpop list4] [string repeat d 500]
+ assert_equal [r lpop list4] [string repeat j 500]
+ assert_equal [r lpop list4] [string repeat g 500]
+ assert_equal [r lpop list4] [string repeat f 500]
+ assert_equal [r lpop list4] [string repeat e 500]
+ assert_equal [r lpop list4] [string repeat c 500]
+ assert_equal [r lpop list4] [string repeat b 500]
+ assert_equal [r lpop list4] [string repeat a 500]
+ r debug quicklist-packed-threshold 0
+ } {OK} {needs:debug}
+
+ test {plain node check compression with ltrim} {
+ r debug quicklist-packed-threshold 1b
+ r lpush list5 [string repeat a 500]
+ r linsert list5 after [string repeat a 500] [string repeat b 500]
+ r rpush list5 [string repeat c 500]
+ assert_equal [string repeat b 500] [r lindex list5 1]
+ r LTRIM list5 1 -1
+ assert_equal [r llen list5] 2
+ r debug quicklist-packed-threshold 0
+ } {OK} {needs:debug}
+
+ test {plain node check compression using lset} {
+ r debug quicklist-packed-threshold 1b
+ r lpush list6 [string repeat a 500]
+ r LSET list6 0 [string repeat b 500]
+ assert_equal [string repeat b 500] [r lindex list6 0]
+ r lpush list6 [string repeat c 500]
+ r LSET list6 0 [string repeat d 500]
+ assert_equal [string repeat d 500] [r lindex list6 0]
+ r debug quicklist-packed-threshold 0
+ } {OK} {needs:debug}
+
+ # revert config for external mode tests.
+ r config set list-compress-depth 0
+}
+
+# check functionality of plain nodes using low packed-threshold
+start_server [list overrides [list save ""] ] {
+ # basic command check for plain nodes - "LPUSH & LPOP"
+ test {Test LPUSH and LPOP on plain nodes} {
+ r flushdb
+ r debug quicklist-packed-threshold 1b
+ r lpush lst 9
+ r lpush lst xxxxxxxxxx
+ r lpush lst xxxxxxxxxx
+ set s0 [s used_memory]
+ assert {$s0 > 10}
+ assert {[r llen lst] == 3}
+ set s0 [r rpop lst]
+ set s1 [r rpop lst]
+ assert {$s0 eq "9"}
+ assert {[r llen lst] == 1}
+ r lpop lst
+ assert {[string length $s1] == 10}
+ # check rdb
+ r lpush lst xxxxxxxxxx
+ r lpush lst bb
+ r debug reload
+ assert_equal [r rpop lst] "xxxxxxxxxx"
+ r debug quicklist-packed-threshold 0
+ } {OK} {needs:debug}
+
+ # basic command check for plain nodes - "LINDEX & LINSERT"
+ test {Test LINDEX and LINSERT on plain nodes} {
+ r flushdb
+ r debug quicklist-packed-threshold 1b
+ r lpush lst xxxxxxxxxxx
+ r lpush lst 9
+ r lpush lst xxxxxxxxxxx
+ r linsert lst before "9" "8"
+ assert {[r lindex lst 1] eq "8"}
+ r linsert lst BEFORE "9" "7"
+ r linsert lst BEFORE "9" "xxxxxxxxxxx"
+ assert {[r lindex lst 3] eq "xxxxxxxxxxx"}
+ r debug quicklist-packed-threshold 0
+ } {OK} {needs:debug}
+
+ # basic command check for plain nodes - "LTRIM"
+ test {Test LTRIM on plain nodes} {
+ r flushdb
+ r debug quicklist-packed-threshold 1b
+ r lpush lst1 9
+ r lpush lst1 xxxxxxxxxxx
+ r lpush lst1 9
+ r LTRIM lst1 1 -1
+ assert_equal [r llen lst1] 2
+ r debug quicklist-packed-threshold 0
+ } {OK} {needs:debug}
+
+ # basic command check for plain nodes - "LREM"
+ test {Test LREM on plain nodes} {
+ r flushdb
+ r debug quicklist-packed-threshold 1b
+ r lpush lst one
+ r lpush lst xxxxxxxxxxx
+ set s0 [s used_memory]
+ assert {$s0 > 10}
+ r lpush lst 9
+ r LREM lst -2 "one"
+ assert_equal [r llen lst] 2
+ r debug quicklist-packed-threshold 0
+ } {OK} {needs:debug}
+
+ # basic command check for plain nodes - "LPOS"
+ test {Test LPOS on plain nodes} {
+ r flushdb
+ r debug quicklist-packed-threshold 1b
+ r RPUSH lst "aa"
+ r RPUSH lst "bb"
+ r RPUSH lst "cc"
+ r LSET lst 0 "xxxxxxxxxxx"
+ assert_equal [r LPOS lst "xxxxxxxxxxx"] 0
+ r debug quicklist-packed-threshold 0
+ } {OK} {needs:debug}
+
+ # basic command check for plain nodes - "LMOVE"
+ test {Test LMOVE on plain nodes} {
+ r flushdb
+ r debug quicklist-packed-threshold 1b
+ r RPUSH lst2{t} "aa"
+ r RPUSH lst2{t} "bb"
+ r LSET lst2{t} 0 xxxxxxxxxxx
+ r RPUSH lst2{t} "cc"
+ r RPUSH lst2{t} "dd"
+ r LMOVE lst2{t} lst{t} RIGHT LEFT
+ r LMOVE lst2{t} lst{t} LEFT RIGHT
+ assert_equal [r llen lst{t}] 2
+ assert_equal [r llen lst2{t}] 2
+ assert_equal [r lpop lst2{t}] "bb"
+ assert_equal [r lpop lst2{t}] "cc"
+ assert_equal [r lpop lst{t}] "dd"
+ assert_equal [r lpop lst{t}] "xxxxxxxxxxx"
+ r debug quicklist-packed-threshold 0
+ } {OK} {needs:debug}
+
+ # testing LSET with combinations of node types
+ # plain->packed , packed->plain, plain->plain, packed->packed
+ test {Test LSET with packed / plain combinations} {
+ r debug quicklist-packed-threshold 5b
+ r RPUSH lst "aa"
+ r RPUSH lst "bb"
+ r lset lst 0 [string repeat d 50001]
+ set s1 [r lpop lst]
+ assert_equal $s1 [string repeat d 50001]
+ r RPUSH lst [string repeat f 50001]
+ r lset lst 0 [string repeat e 50001]
+ set s1 [r lpop lst]
+ assert_equal $s1 [string repeat e 50001]
+ r RPUSH lst [string repeat m 50001]
+ r lset lst 0 "bb"
+ set s1 [r lpop lst]
+ assert_equal $s1 "bb"
+ r RPUSH lst "bb"
+ r lset lst 0 "cc"
+ set s1 [r lpop lst]
+ assert_equal $s1 "cc"
+ r debug quicklist-packed-threshold 0
+ } {OK} {needs:debug}
+
+ # checking LSET in case ziplist needs to be split
+ test {Test LSET with packed is split in the middle} {
+ r flushdb
+ r debug quicklist-packed-threshold 5b
+ r RPUSH lst "aa"
+ r RPUSH lst "bb"
+ r RPUSH lst "cc"
+ r RPUSH lst "dd"
+ r RPUSH lst "ee"
+ r lset lst 2 [string repeat e 10]
+ assert_equal [r lpop lst] "aa"
+ assert_equal [r lpop lst] "bb"
+ assert_equal [r lpop lst] [string repeat e 10]
+ assert_equal [r lpop lst] "dd"
+ assert_equal [r lpop lst] "ee"
+ r debug quicklist-packed-threshold 0
+ } {OK} {needs:debug}
+
+
+ # repeating "plain check LSET with combinations"
+ # but now with single item in each ziplist
+ test {Test LSET with packed consist only one item} {
+ r flushdb
+ set original_config [config_get_set list-max-ziplist-size 1]
+ r debug quicklist-packed-threshold 1b
+ r RPUSH lst "aa"
+ r RPUSH lst "bb"
+ r lset lst 0 [string repeat d 50001]
+ set s1 [r lpop lst]
+ assert_equal $s1 [string repeat d 50001]
+ r RPUSH lst [string repeat f 50001]
+ r lset lst 0 [string repeat e 50001]
+ set s1 [r lpop lst]
+ assert_equal $s1 [string repeat e 50001]
+ r RPUSH lst [string repeat m 50001]
+ r lset lst 0 "bb"
+ set s1 [r lpop lst]
+ assert_equal $s1 "bb"
+ r RPUSH lst "bb"
+ r lset lst 0 "cc"
+ set s1 [r lpop lst]
+ assert_equal $s1 "cc"
+ r debug quicklist-packed-threshold 0
+ r config set list-max-ziplist-size $original_config
+ } {OK} {needs:debug}
+
+ test {Crash due to delete entry from a compress quicklist node} {
+ r flushdb
+ r debug quicklist-packed-threshold 100b
+ set original_config [config_get_set list-compress-depth 1]
+
+ set small_ele [string repeat x 32]
+ set large_ele [string repeat x 100]
+
+ # Push a large element
+ r RPUSH lst $large_ele
+
+ # Insert two elements and keep them in the same node
+ r RPUSH lst $small_ele
+ r RPUSH lst $small_ele
+
+ # When setting the position of -1 to a large element, we first insert
+ # a large element at the end and then delete its previous element.
+ r LSET lst -1 $large_ele
+ assert_equal "$large_ele $small_ele $large_ele" [r LRANGE lst 0 -1]
+
+ r debug quicklist-packed-threshold 0
+ r config set list-compress-depth $original_config
+ } {OK} {needs:debug}
+
+ test {Crash due to split quicklist node wrongly} {
+ r flushdb
+ r debug quicklist-packed-threshold 10b
+
+ r LPUSH lst "aa"
+ r LPUSH lst "bb"
+ r LSET lst -2 [string repeat x 10]
+ r RPOP lst
+ assert_equal [string repeat x 10] [r LRANGE lst 0 -1]
+
+ r debug quicklist-packed-threshold 0
+ } {OK} {needs:debug}
+}
+
+run_solo {list-large-memory} {
+start_server [list overrides [list save ""] ] {
+
+# test if the server supports such large configs (avoid 32 bit builds)
+catch {
+ r config set proto-max-bulk-len 10000000000 ;#10gb
+ r config set client-query-buffer-limit 10000000000 ;#10gb
+}
+if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} {
+
+ set str_length 5000000000
+
+ # repeating all the plain nodes basic checks with 5gb values
+ test {Test LPUSH and LPOP on plain nodes over 4GB} {
+ r flushdb
+ r lpush lst 9
+ r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n"
+ write_big_bulk $str_length;
+ r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n"
+ write_big_bulk $str_length;
+ set s0 [s used_memory]
+ assert {$s0 > $str_length}
+ assert {[r llen lst] == 3}
+ assert_equal [r rpop lst] "9"
+ assert_equal [read_big_bulk {r rpop lst}] $str_length
+ assert {[r llen lst] == 1}
+ assert_equal [read_big_bulk {r rpop lst}] $str_length
+ } {} {large-memory}
+
+ test {Test LINDEX and LINSERT on plain nodes over 4GB} {
+ r flushdb
+ r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n"
+ write_big_bulk $str_length;
+ r lpush lst 9
+ r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n"
+ write_big_bulk $str_length;
+ r linsert lst before "9" "8"
+ assert_equal [r lindex lst 1] "8"
+ r LINSERT lst BEFORE "9" "7"
+ r write "*5\r\n\$7\r\nLINSERT\r\n\$3\r\nlst\r\n\$6\r\nBEFORE\r\n\$3\r\n\"9\"\r\n"
+ write_big_bulk 10;
+ assert_equal [read_big_bulk {r rpop lst}] $str_length
+ } {} {large-memory}
+
+ test {Test LTRIM on plain nodes over 4GB} {
+ r flushdb
+ r lpush lst 9
+ r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n"
+ write_big_bulk $str_length;
+ r lpush lst 9
+ r LTRIM lst 1 -1
+ assert_equal [r llen lst] 2
+ assert_equal [r rpop lst] 9
+ assert_equal [read_big_bulk {r rpop lst}] $str_length
+ } {} {large-memory}
+
+ test {Test LREM on plain nodes over 4GB} {
+ r flushdb
+ r lpush lst one
+ r write "*3\r\n\$5\r\nLPUSH\r\n\$3\r\nlst\r\n"
+ write_big_bulk $str_length;
+ r lpush lst 9
+ r LREM lst -2 "one"
+ assert_equal [read_big_bulk {r rpop lst}] $str_length
+ r llen lst
+ } {1} {large-memory}
+
+ test {Test LSET on plain nodes over 4GB} {
+ r flushdb
+ r RPUSH lst "aa"
+ r RPUSH lst "bb"
+ r RPUSH lst "cc"
+ r write "*4\r\n\$4\r\nLSET\r\n\$3\r\nlst\r\n\$1\r\n0\r\n"
+ write_big_bulk $str_length;
+ assert_equal [r rpop lst] "cc"
+ assert_equal [r rpop lst] "bb"
+ assert_equal [read_big_bulk {r rpop lst}] $str_length
+ } {} {large-memory}
+
+ test {Test LMOVE on plain nodes over 4GB} {
+ r flushdb
+ r RPUSH lst2{t} "aa"
+ r RPUSH lst2{t} "bb"
+ r write "*4\r\n\$4\r\nLSET\r\n\$7\r\nlst2{t}\r\n\$1\r\n0\r\n"
+ write_big_bulk $str_length;
+ r RPUSH lst2{t} "cc"
+ r RPUSH lst2{t} "dd"
+ r LMOVE lst2{t} lst{t} RIGHT LEFT
+ assert_equal [read_big_bulk {r LMOVE lst2{t} lst{t} LEFT RIGHT}] $str_length
+ assert_equal [r llen lst{t}] 2
+ assert_equal [r llen lst2{t}] 2
+ assert_equal [r lpop lst2{t}] "bb"
+ assert_equal [r lpop lst2{t}] "cc"
+ assert_equal [r lpop lst{t}] "dd"
+ assert_equal [read_big_bulk {r rpop lst{t}}] $str_length
+ } {} {large-memory}
+
+ # restore defaults
+ r config set proto-max-bulk-len 536870912
+ r config set client-query-buffer-limit 1073741824
+
+} ;# skip 32bit builds
+}
+} ;# run_solo
+
+start_server {
+ tags {"list"}
+ overrides {
+ "list-max-ziplist-size" -1
+ }
+} {
+ source "tests/unit/type/list-common.tcl"
+
+ # A helper function to execute either B*POP or BLMPOP* with one input key.
+ proc bpop_command {rd pop key timeout} {
+ if {$pop == "BLMPOP_LEFT"} {
+ $rd blmpop $timeout 1 $key left count 1
+ } elseif {$pop == "BLMPOP_RIGHT"} {
+ $rd blmpop $timeout 1 $key right count 1
+ } else {
+ $rd $pop $key $timeout
+ }
+ }
+
+ # A helper function to execute either B*POP or BLMPOP* with two input keys.
+ proc bpop_command_two_key {rd pop key key2 timeout} {
+ if {$pop == "BLMPOP_LEFT"} {
+ $rd blmpop $timeout 2 $key $key2 left count 1
+ } elseif {$pop == "BLMPOP_RIGHT"} {
+ $rd blmpop $timeout 2 $key $key2 right count 1
+ } else {
+ $rd $pop $key $key2 $timeout
+ }
+ }
+
+ proc create_listpack {key entries} {
+ r del $key
+ foreach entry $entries { r rpush $key $entry }
+ assert_encoding listpack $key
+ }
+
+ proc create_quicklist {key entries} {
+ r del $key
+ foreach entry $entries { r rpush $key $entry }
+ assert_encoding quicklist $key
+ }
+
+foreach {type large} [array get largevalue] {
+ test "LPOS basic usage - $type" {
+ r DEL mylist
+ r RPUSH mylist a b c $large 2 3 c c
+ assert {[r LPOS mylist a] == 0}
+ assert {[r LPOS mylist c] == 2}
+ }
+
+ test {LPOS RANK (positive, negative and zero rank) option} {
+ assert {[r LPOS mylist c RANK 1] == 2}
+ assert {[r LPOS mylist c RANK 2] == 6}
+ assert {[r LPOS mylist c RANK 4] eq ""}
+ assert {[r LPOS mylist c RANK -1] == 7}
+ assert {[r LPOS mylist c RANK -2] == 6}
+ assert_error "*RANK can't be zero: use 1 to start from the first match, 2 from the second ... or use negative to start*" {r LPOS mylist c RANK 0}
+ assert_error "*value is out of range*" {r LPOS mylist c RANK -9223372036854775808}
+ }
+
+ test {LPOS COUNT option} {
+ assert {[r LPOS mylist c COUNT 0] == {2 6 7}}
+ assert {[r LPOS mylist c COUNT 1] == {2}}
+ assert {[r LPOS mylist c COUNT 2] == {2 6}}
+ assert {[r LPOS mylist c COUNT 100] == {2 6 7}}
+ }
+
+ test {LPOS COUNT + RANK option} {
+ assert {[r LPOS mylist c COUNT 0 RANK 2] == {6 7}}
+ assert {[r LPOS mylist c COUNT 2 RANK -1] == {7 6}}
+ }
+
+ test {LPOS non existing key} {
+ assert {[r LPOS mylistxxx c COUNT 0 RANK 2] eq {}}
+ }
+
+ test {LPOS no match} {
+ assert {[r LPOS mylist x COUNT 2 RANK -1] eq {}}
+ assert {[r LPOS mylist x RANK -1] eq {}}
+ }
+
+ test {LPOS MAXLEN} {
+ assert {[r LPOS mylist a COUNT 0 MAXLEN 1] == {0}}
+ assert {[r LPOS mylist c COUNT 0 MAXLEN 1] == {}}
+ assert {[r LPOS mylist c COUNT 0 MAXLEN 3] == {2}}
+ assert {[r LPOS mylist c COUNT 0 MAXLEN 3 RANK -1] == {7 6}}
+ assert {[r LPOS mylist c COUNT 0 MAXLEN 7 RANK 2] == {6}}
+ }
+
+ test {LPOS when RANK is greater than matches} {
+ r DEL mylist
+ r LPUSH mylist a
+ assert {[r LPOS mylist b COUNT 10 RANK 5] eq {}}
+ }
+
+ test "LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - $type" {
+ # first lpush then rpush
+ r del mylist1
+ assert_equal 1 [r lpush mylist1 $large]
+ assert_encoding $type mylist1
+ assert_equal 2 [r rpush mylist1 b]
+ assert_equal 3 [r rpush mylist1 c]
+ assert_equal 3 [r llen mylist1]
+ assert_equal $large [r lindex mylist1 0]
+ assert_equal b [r lindex mylist1 1]
+ assert_equal c [r lindex mylist1 2]
+ assert_equal {} [r lindex mylist1 3]
+ assert_equal c [r rpop mylist1]
+ assert_equal $large [r lpop mylist1]
+
+ # first rpush then lpush
+ r del mylist2
+ assert_equal 1 [r rpush mylist2 $large]
+ assert_equal 2 [r lpush mylist2 b]
+ assert_equal 3 [r lpush mylist2 c]
+ assert_encoding $type mylist2
+ assert_equal 3 [r llen mylist2]
+ assert_equal c [r lindex mylist2 0]
+ assert_equal b [r lindex mylist2 1]
+ assert_equal $large [r lindex mylist2 2]
+ assert_equal {} [r lindex mylist2 3]
+ assert_equal $large [r rpop mylist2]
+ assert_equal c [r lpop mylist2]
+ }
+
+ test "LPOP/RPOP with wrong number of arguments" {
+ assert_error {*wrong number of arguments for 'lpop' command} {r lpop key 1 1}
+ assert_error {*wrong number of arguments for 'rpop' command} {r rpop key 2 2}
+ }
+
+ test "RPOP/LPOP with the optional count argument - $type" {
+ assert_equal 7 [r lpush listcount aa $large cc dd ee ff gg]
+ assert_equal {gg} [r lpop listcount 1]
+ assert_equal {ff ee} [r lpop listcount 2]
+ assert_equal "aa $large" [r rpop listcount 2]
+ assert_equal {cc} [r rpop listcount 1]
+ assert_equal {dd} [r rpop listcount 123]
+ assert_error "*ERR*range*" {r lpop forbarqaz -123}
+ }
+}
+
+ proc verify_resp_response {resp response resp2_response resp3_response} {
+ if {$resp == 2} {
+ assert_equal $response $resp2_response
+ } elseif {$resp == 3} {
+ assert_equal $response $resp3_response
+ }
+ }
+
+ foreach resp {3 2} {
+ if {[lsearch $::denytags "resp3"] >= 0} {
+ if {$resp == 3} {continue}
+ } elseif {$::force_resp3} {
+ if {$resp == 2} {continue}
+ }
+ r hello $resp
+
+ # Make sure we can distinguish between an empty array and a null response
+ r readraw 1
+
+ test "LPOP/RPOP with the count 0 returns an empty array in RESP$resp" {
+ r lpush listcount zero
+ assert_equal {*0} [r lpop listcount 0]
+ assert_equal {*0} [r rpop listcount 0]
+ }
+
+ test "LPOP/RPOP against non existing key in RESP$resp" {
+ r del non_existing_key
+
+ verify_resp_response $resp [r lpop non_existing_key] {$-1} {_}
+ verify_resp_response $resp [r rpop non_existing_key] {$-1} {_}
+ }
+
+ test "LPOP/RPOP with <count> against non existing key in RESP$resp" {
+ r del non_existing_key
+
+ verify_resp_response $resp [r lpop non_existing_key 0] {*-1} {_}
+ verify_resp_response $resp [r lpop non_existing_key 1] {*-1} {_}
+
+ verify_resp_response $resp [r rpop non_existing_key 0] {*-1} {_}
+ verify_resp_response $resp [r rpop non_existing_key 1] {*-1} {_}
+ }
+
+ r readraw 0
+ r hello 2
+ }
+
+ test {Variadic RPUSH/LPUSH} {
+ r del mylist
+ assert_equal 4 [r lpush mylist a b c d]
+ assert_equal 8 [r rpush mylist 0 1 2 3]
+ assert_equal {d c b a 0 1 2 3} [r lrange mylist 0 -1]
+ }
+
+ test {DEL a list} {
+ assert_equal 1 [r del mylist2]
+ assert_equal 0 [r exists mylist2]
+ assert_equal 0 [r llen mylist2]
+ }
+
+ foreach {type large} [array get largevalue] {
+ foreach {pop} {BLPOP BLMPOP_LEFT} {
+ test "$pop: single existing list - $type" {
+ set rd [redis_deferring_client]
+ create_$type blist "a b $large c d"
+
+ bpop_command $rd $pop blist 1
+ assert_equal {blist a} [$rd read]
+ if {$pop == "BLPOP"} {
+ bpop_command $rd BRPOP blist 1
+ } else {
+ bpop_command $rd BLMPOP_RIGHT blist 1
+ }
+ assert_equal {blist d} [$rd read]
+
+ bpop_command $rd $pop blist 1
+ assert_equal {blist b} [$rd read]
+ if {$pop == "BLPOP"} {
+ bpop_command $rd BRPOP blist 1
+ } else {
+ bpop_command $rd BLMPOP_RIGHT blist 1
+ }
+ assert_equal {blist c} [$rd read]
+
+ assert_equal 1 [r llen blist]
+ $rd close
+ }
+
+ test "$pop: multiple existing lists - $type" {
+ set rd [redis_deferring_client]
+ create_$type blist1{t} "a $large c"
+ create_$type blist2{t} "d $large f"
+
+ bpop_command_two_key $rd $pop blist1{t} blist2{t} 1
+ assert_equal {blist1{t} a} [$rd read]
+ if {$pop == "BLPOP"} {
+ bpop_command_two_key $rd BRPOP blist1{t} blist2{t} 1
+ } else {
+ bpop_command_two_key $rd BLMPOP_RIGHT blist1{t} blist2{t} 1
+ }
+ assert_equal {blist1{t} c} [$rd read]
+ assert_equal 1 [r llen blist1{t}]
+ assert_equal 3 [r llen blist2{t}]
+
+ bpop_command_two_key $rd $pop blist2{t} blist1{t} 1
+ assert_equal {blist2{t} d} [$rd read]
+ if {$pop == "BLPOP"} {
+ bpop_command_two_key $rd BRPOP blist2{t} blist1{t} 1
+ } else {
+ bpop_command_two_key $rd BLMPOP_RIGHT blist2{t} blist1{t} 1
+ }
+ assert_equal {blist2{t} f} [$rd read]
+ assert_equal 1 [r llen blist1{t}]
+ assert_equal 1 [r llen blist2{t}]
+ $rd close
+ }
+
+ test "$pop: second list has an entry - $type" {
+ set rd [redis_deferring_client]
+ r del blist1{t}
+ create_$type blist2{t} "d $large f"
+
+ bpop_command_two_key $rd $pop blist1{t} blist2{t} 1
+ assert_equal {blist2{t} d} [$rd read]
+ if {$pop == "BLPOP"} {
+ bpop_command_two_key $rd BRPOP blist1{t} blist2{t} 1
+ } else {
+ bpop_command_two_key $rd BLMPOP_RIGHT blist1{t} blist2{t} 1
+ }
+ assert_equal {blist2{t} f} [$rd read]
+ assert_equal 0 [r llen blist1{t}]
+ assert_equal 1 [r llen blist2{t}]
+ $rd close
+ }
+ }
+
+ test "BRPOPLPUSH - $type" {
+ r del target{t}
+ r rpush target{t} bar
+
+ set rd [redis_deferring_client]
+ create_$type blist{t} "a b $large c d"
+
+ $rd brpoplpush blist{t} target{t} 1
+ assert_equal d [$rd read]
+
+ assert_equal d [r lpop target{t}]
+ assert_equal "a b $large c" [r lrange blist{t} 0 -1]
+ $rd close
+ }
+
+ foreach wherefrom {left right} {
+ foreach whereto {left right} {
+ test "BLMOVE $wherefrom $whereto - $type" {
+ r del target{t}
+ r rpush target{t} bar
+
+ set rd [redis_deferring_client]
+ create_$type blist{t} "a b $large c d"
+
+ $rd blmove blist{t} target{t} $wherefrom $whereto 1
+ set poppedelement [$rd read]
+
+ if {$wherefrom eq "right"} {
+ assert_equal d $poppedelement
+ assert_equal "a b $large c" [r lrange blist{t} 0 -1]
+ } else {
+ assert_equal a $poppedelement
+ assert_equal "b $large c d" [r lrange blist{t} 0 -1]
+ }
+
+ if {$whereto eq "right"} {
+ assert_equal $poppedelement [r rpop target{t}]
+ } else {
+ assert_equal $poppedelement [r lpop target{t}]
+ }
+ $rd close
+ }
+ }
+ }
+ }
+
+foreach {pop} {BLPOP BLMPOP_LEFT} {
+ test "$pop, LPUSH + DEL should not awake blocked client" {
+ set rd [redis_deferring_client]
+ r del list
+
+ bpop_command $rd $pop list 0
+ wait_for_blocked_client
+
+ r multi
+ r lpush list a
+ r del list
+ r exec
+ r del list
+ r lpush list b
+ assert_equal {list b} [$rd read]
+ $rd close
+ }
+
+ test "$pop, LPUSH + DEL + SET should not awake blocked client" {
+ set rd [redis_deferring_client]
+ r del list
+
+ bpop_command $rd $pop list 0
+ wait_for_blocked_client
+
+ r multi
+ r lpush list a
+ r del list
+ r set list foo
+ r exec
+ r del list
+ r lpush list b
+ assert_equal {list b} [$rd read]
+ $rd close
+ }
+}
+
+ test "BLPOP with same key multiple times should work (issue #801)" {
+ set rd [redis_deferring_client]
+ r del list1{t} list2{t}
+
+ # Data arriving after the BLPOP.
+ $rd blpop list1{t} list2{t} list2{t} list1{t} 0
+ wait_for_blocked_client
+ r lpush list1{t} a
+ assert_equal [$rd read] {list1{t} a}
+ $rd blpop list1{t} list2{t} list2{t} list1{t} 0
+ wait_for_blocked_client
+ r lpush list2{t} b
+ assert_equal [$rd read] {list2{t} b}
+
+ # Data already there.
+ r lpush list1{t} a
+ r lpush list2{t} b
+ $rd blpop list1{t} list2{t} list2{t} list1{t} 0
+ assert_equal [$rd read] {list1{t} a}
+ $rd blpop list1{t} list2{t} list2{t} list1{t} 0
+ assert_equal [$rd read] {list2{t} b}
+ $rd close
+ }
+
+foreach {pop} {BLPOP BLMPOP_LEFT} {
+ test "MULTI/EXEC is isolated from the point of view of $pop" {
+ set rd [redis_deferring_client]
+ r del list
+
+ bpop_command $rd $pop list 0
+ wait_for_blocked_client
+
+ r multi
+ r lpush list a
+ r lpush list b
+ r lpush list c
+ r exec
+ assert_equal {list c} [$rd read]
+ $rd close
+ }
+
+ test "$pop with variadic LPUSH" {
+ set rd [redis_deferring_client]
+ r del blist
+ bpop_command $rd $pop blist 0
+ wait_for_blocked_client
+ assert_equal 2 [r lpush blist foo bar]
+ assert_equal {blist bar} [$rd read]
+ assert_equal foo [lindex [r lrange blist 0 -1] 0]
+ $rd close
+ }
+}
+
+ test "BRPOPLPUSH with zero timeout should block indefinitely" {
+ set rd [redis_deferring_client]
+ r del blist{t} target{t}
+ r rpush target{t} bar
+ $rd brpoplpush blist{t} target{t} 0
+ wait_for_blocked_clients_count 1
+ r rpush blist{t} foo
+ assert_equal foo [$rd read]
+ assert_equal {foo bar} [r lrange target{t} 0 -1]
+ $rd close
+ }
+
+ foreach wherefrom {left right} {
+ foreach whereto {left right} {
+ test "BLMOVE $wherefrom $whereto with zero timeout should block indefinitely" {
+ set rd [redis_deferring_client]
+ r del blist{t} target{t}
+ r rpush target{t} bar
+ $rd blmove blist{t} target{t} $wherefrom $whereto 0
+ wait_for_blocked_clients_count 1
+ r rpush blist{t} foo
+ assert_equal foo [$rd read]
+ if {$whereto eq "right"} {
+ assert_equal {bar foo} [r lrange target{t} 0 -1]
+ } else {
+ assert_equal {foo bar} [r lrange target{t} 0 -1]
+ }
+ $rd close
+ }
+ }
+ }
+
+ foreach wherefrom {left right} {
+ foreach whereto {left right} {
+ test "BLMOVE ($wherefrom, $whereto) with a client BLPOPing the target list" {
+ set rd [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+ r del blist{t} target{t}
+ $rd2 blpop target{t} 0
+ wait_for_blocked_clients_count 1
+ $rd blmove blist{t} target{t} $wherefrom $whereto 0
+ wait_for_blocked_clients_count 2
+ r rpush blist{t} foo
+ assert_equal foo [$rd read]
+ assert_equal {target{t} foo} [$rd2 read]
+ assert_equal 0 [r exists target{t}]
+ $rd close
+ $rd2 close
+ }
+ }
+ }
+
+ test "BRPOPLPUSH with wrong source type" {
+ set rd [redis_deferring_client]
+ r del blist{t} target{t}
+ r set blist{t} nolist
+ $rd brpoplpush blist{t} target{t} 1
+ assert_error "WRONGTYPE*" {$rd read}
+ $rd close
+ }
+
+ test "BRPOPLPUSH with wrong destination type" {
+ set rd [redis_deferring_client]
+ r del blist{t} target{t}
+ r set target{t} nolist
+ r lpush blist{t} foo
+ $rd brpoplpush blist{t} target{t} 1
+ assert_error "WRONGTYPE*" {$rd read}
+ $rd close
+
+ set rd [redis_deferring_client]
+ r del blist{t} target{t}
+ r set target{t} nolist
+ $rd brpoplpush blist{t} target{t} 0
+ wait_for_blocked_clients_count 1
+ r rpush blist{t} foo
+ assert_error "WRONGTYPE*" {$rd read}
+ assert_equal {foo} [r lrange blist{t} 0 -1]
+ $rd close
+ }
+
+ test "BRPOPLPUSH maintains order of elements after failure" {
+ set rd [redis_deferring_client]
+ r del blist{t} target{t}
+ r set target{t} nolist
+ $rd brpoplpush blist{t} target{t} 0
+ wait_for_blocked_client
+ r rpush blist{t} a b c
+ assert_error "WRONGTYPE*" {$rd read}
+ $rd close
+ r lrange blist{t} 0 -1
+ } {a b c}
+
+ test "BRPOPLPUSH with multiple blocked clients" {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+ r del blist{t} target1{t} target2{t}
+ r set target1{t} nolist
+ $rd1 brpoplpush blist{t} target1{t} 0
+ wait_for_blocked_clients_count 1
+ $rd2 brpoplpush blist{t} target2{t} 0
+ wait_for_blocked_clients_count 2
+ r lpush blist{t} foo
+
+ assert_error "WRONGTYPE*" {$rd1 read}
+ assert_equal {foo} [$rd2 read]
+ assert_equal {foo} [r lrange target2{t} 0 -1]
+ $rd1 close
+ $rd2 close
+ }
+
+ test "BLMPOP with multiple blocked clients" {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+ set rd3 [redis_deferring_client]
+ set rd4 [redis_deferring_client]
+ r del blist{t} blist2{t}
+
+ $rd1 blmpop 0 2 blist{t} blist2{t} left count 1
+ wait_for_blocked_clients_count 1
+ $rd2 blmpop 0 2 blist{t} blist2{t} right count 10
+ wait_for_blocked_clients_count 2
+ $rd3 blmpop 0 2 blist{t} blist2{t} left count 10
+ wait_for_blocked_clients_count 3
+ $rd4 blmpop 0 2 blist{t} blist2{t} right count 1
+ wait_for_blocked_clients_count 4
+
+ r multi
+ r lpush blist{t} a b c d e
+ r lpush blist2{t} 1 2 3 4 5
+ r exec
+
+ assert_equal {blist{t} e} [$rd1 read]
+ assert_equal {blist{t} {a b c d}} [$rd2 read]
+ assert_equal {blist2{t} {5 4 3 2 1}} [$rd3 read]
+
+ r lpush blist2{t} 1 2 3
+ assert_equal {blist2{t} 1} [$rd4 read]
+ $rd1 close
+ $rd2 close
+ $rd3 close
+ $rd4 close
+ }
+
+ test "Linked LMOVEs" {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+
+ r del list1{t} list2{t} list3{t}
+
+ $rd1 blmove list1{t} list2{t} right left 0
+ wait_for_blocked_clients_count 1
+ $rd2 blmove list2{t} list3{t} left right 0
+ wait_for_blocked_clients_count 2
+
+ r rpush list1{t} foo
+
+ assert_equal {} [r lrange list1{t} 0 -1]
+ assert_equal {} [r lrange list2{t} 0 -1]
+ assert_equal {foo} [r lrange list3{t} 0 -1]
+ $rd1 close
+ $rd2 close
+ }
+
+ test "Circular BRPOPLPUSH" {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+
+ r del list1{t} list2{t}
+
+ $rd1 brpoplpush list1{t} list2{t} 0
+ wait_for_blocked_clients_count 1
+ $rd2 brpoplpush list2{t} list1{t} 0
+ wait_for_blocked_clients_count 2
+
+ r rpush list1{t} foo
+
+ assert_equal {foo} [r lrange list1{t} 0 -1]
+ assert_equal {} [r lrange list2{t} 0 -1]
+ $rd1 close
+ $rd2 close
+ }
+
+ test "Self-referential BRPOPLPUSH" {
+ set rd [redis_deferring_client]
+
+ r del blist{t}
+
+ $rd brpoplpush blist{t} blist{t} 0
+ wait_for_blocked_client
+
+ r rpush blist{t} foo
+
+ assert_equal {foo} [r lrange blist{t} 0 -1]
+ $rd close
+ }
+
+ test "BRPOPLPUSH inside a transaction" {
+ r del xlist{t} target{t}
+ r lpush xlist{t} foo
+ r lpush xlist{t} bar
+
+ r multi
+ r brpoplpush xlist{t} target{t} 0
+ r brpoplpush xlist{t} target{t} 0
+ r brpoplpush xlist{t} target{t} 0
+ r lrange xlist{t} 0 -1
+ r lrange target{t} 0 -1
+ r exec
+ } {foo bar {} {} {bar foo}}
+
+ test "PUSH resulting from BRPOPLPUSH affect WATCH" {
+ set blocked_client [redis_deferring_client]
+ set watching_client [redis_deferring_client]
+ r del srclist{t} dstlist{t} somekey{t}
+ r set somekey{t} somevalue
+ $blocked_client brpoplpush srclist{t} dstlist{t} 0
+ wait_for_blocked_client
+ $watching_client watch dstlist{t}
+ $watching_client read
+ $watching_client multi
+ $watching_client read
+ $watching_client get somekey{t}
+ $watching_client read
+ r lpush srclist{t} element
+ $watching_client exec
+ set res [$watching_client read]
+ $blocked_client close
+ $watching_client close
+ set _ $res
+ } {}
+
+ test "BRPOPLPUSH does not affect WATCH while still blocked" {
+ set blocked_client [redis_deferring_client]
+ set watching_client [redis_deferring_client]
+ r del srclist{t} dstlist{t} somekey{t}
+ r set somekey{t} somevalue
+ $blocked_client brpoplpush srclist{t} dstlist{t} 0
+ wait_for_blocked_client
+ $watching_client watch dstlist{t}
+ $watching_client read
+ $watching_client multi
+ $watching_client read
+ $watching_client get somekey{t}
+ $watching_client read
+ $watching_client exec
+ # Blocked BLPOPLPUSH may create problems, unblock it.
+ r lpush srclist{t} element
+ set res [$watching_client read]
+ $blocked_client close
+ $watching_client close
+ set _ $res
+ } {somevalue}
+
+ test {BRPOPLPUSH timeout} {
+ set rd [redis_deferring_client]
+
+ $rd brpoplpush foo_list{t} bar_list{t} 1
+ wait_for_blocked_clients_count 1
+ wait_for_blocked_clients_count 0 500 10
+ set res [$rd read]
+ $rd close
+ set _ $res
+ } {}
+
+ test {SWAPDB awakes blocked client} {
+ r flushall
+ r select 1
+ r rpush k hello
+ r select 9
+ set rd [redis_deferring_client]
+ $rd brpop k 5
+ wait_for_blocked_clients_count 1
+ r swapdb 1 9
+ $rd read
+ } {k hello} {singledb:skip}
+
+ test {SWAPDB wants to wake blocked client, but the key already expired} {
+ set repl [attach_to_replication_stream]
+ r flushall
+ r debug set-active-expire 0
+ r select 1
+ r rpush k hello
+ r pexpire k 100
+ set rd [redis_deferring_client]
+ $rd deferred 0
+ $rd select 9
+ set id [$rd client id]
+ $rd deferred 1
+ $rd brpop k 1
+ wait_for_blocked_clients_count 1
+ after 101
+ r swapdb 1 9
+ # The SWAPDB command tries to awake the blocked client, but it remains
+ # blocked because the key is expired. Check that the deferred client is
+ # still blocked. Then unblock it.
+ assert_match "*flags=b*" [r client list id $id]
+ r client unblock $id
+ assert_equal {} [$rd read]
+ $rd deferred 0
+ # We want to force key deletion to be propagated to the replica
+ # in order to verify it was expiered on the replication stream.
+ $rd set somekey1 someval1
+ $rd exists k
+ r set somekey2 someval2
+
+ assert_replication_stream $repl {
+ {select *}
+ {flushall}
+ {select 1}
+ {rpush k hello}
+ {pexpireat k *}
+ {swapdb 1 9}
+ {select 9}
+ {set somekey1 someval1}
+ {del k}
+ {select 1}
+ {set somekey2 someval2}
+ }
+ close_replication_stream $repl
+ r debug set-active-expire 1
+ # Restore server and client state
+ r select 9
+ } {OK} {singledb:skip needs:debug}
+
+ test {MULTI + LPUSH + EXPIRE + DEBUG SLEEP on blocked client, key already expired} {
+ set repl [attach_to_replication_stream]
+ r flushall
+ r debug set-active-expire 0
+
+ set rd [redis_deferring_client]
+ $rd client id
+ set id [$rd read]
+ $rd brpop k 0
+ wait_for_blocked_clients_count 1
+
+ r multi
+ r rpush k hello
+ r pexpire k 100
+ r debug sleep 0.2
+ r exec
+
+ # The EXEC command tries to awake the blocked client, but it remains
+ # blocked because the key is expired. Check that the deferred client is
+ # still blocked. Then unblock it.
+ assert_match "*flags=b*" [r client list id $id]
+ r client unblock $id
+ assert_equal {} [$rd read]
+ # We want to force key deletion to be propagated to the replica
+ # in order to verify it was expiered on the replication stream.
+ $rd exists k
+ assert_equal {0} [$rd read]
+ assert_replication_stream $repl {
+ {select *}
+ {flushall}
+ {multi}
+ {rpush k hello}
+ {pexpireat k *}
+ {exec}
+ {del k}
+ }
+ close_replication_stream $repl
+ # Restore server and client state
+ r debug set-active-expire 1
+ r select 9
+ } {OK} {singledb:skip needs:debug}
+
+foreach {pop} {BLPOP BLMPOP_LEFT} {
+ test "$pop when new key is moved into place" {
+ set rd [redis_deferring_client]
+ r del foo{t}
+
+ bpop_command $rd $pop foo{t} 0
+ wait_for_blocked_client
+ r lpush bob{t} abc def hij
+ r rename bob{t} foo{t}
+ set res [$rd read]
+ $rd close
+ set _ $res
+ } {foo{t} hij}
+
+ test "$pop when result key is created by SORT..STORE" {
+ set rd [redis_deferring_client]
+
+ # zero out list from previous test without explicit delete
+ r lpop foo{t}
+ r lpop foo{t}
+ r lpop foo{t}
+
+ bpop_command $rd $pop foo{t} 5
+ wait_for_blocked_client
+ r lpush notfoo{t} hello hola aguacate konichiwa zanzibar
+ r sort notfoo{t} ALPHA store foo{t}
+ set res [$rd read]
+ $rd close
+ set _ $res
+ } {foo{t} aguacate}
+}
+
+ test "BLPOP: timeout value out of range" {
+ # Timeout is parsed as float and multiplied by 1000, added mstime()
+ # and stored in long-long which might lead to out-of-range value.
+ # (Even though given timeout is smaller than LLONG_MAX, the result
+ # will be bigger)
+ assert_error "ERR *is out of range*" {r BLPOP blist1 0x7FFFFFFFFFFFFF}
+ }
+
+ foreach {pop} {BLPOP BRPOP BLMPOP_LEFT BLMPOP_RIGHT} {
+ test "$pop: with single empty list argument" {
+ set rd [redis_deferring_client]
+ r del blist1
+ bpop_command $rd $pop blist1 1
+ wait_for_blocked_client
+ r rpush blist1 foo
+ assert_equal {blist1 foo} [$rd read]
+ assert_equal 0 [r exists blist1]
+ $rd close
+ }
+
+ test "$pop: with negative timeout" {
+ set rd [redis_deferring_client]
+ bpop_command $rd $pop blist1 -1
+ assert_error "ERR *is negative*" {$rd read}
+ $rd close
+ }
+
+ test "$pop: with non-integer timeout" {
+ set rd [redis_deferring_client]
+ r del blist1
+ bpop_command $rd $pop blist1 0.1
+ r rpush blist1 foo
+ assert_equal {blist1 foo} [$rd read]
+ assert_equal 0 [r exists blist1]
+ $rd close
+ }
+
+ test "$pop: with zero timeout should block indefinitely" {
+ # To test this, use a timeout of 0 and wait a second.
+ # The blocking pop should still be waiting for a push.
+ set rd [redis_deferring_client]
+ bpop_command $rd $pop blist1 0
+ wait_for_blocked_client
+ r rpush blist1 foo
+ assert_equal {blist1 foo} [$rd read]
+ $rd close
+ }
+
+ test "$pop: with 0.001 timeout should not block indefinitely" {
+ # Use a timeout of 0.001 and wait for the number of blocked clients to equal 0.
+ # Validate the empty read from the deferring client.
+ set rd [redis_deferring_client]
+ bpop_command $rd $pop blist1 0.001
+ wait_for_blocked_clients_count 0
+ assert_equal {} [$rd read]
+ $rd close
+ }
+
+ test "$pop: second argument is not a list" {
+ set rd [redis_deferring_client]
+ r del blist1{t} blist2{t}
+ r set blist2{t} nolist{t}
+ bpop_command_two_key $rd $pop blist1{t} blist2{t} 1
+ assert_error "WRONGTYPE*" {$rd read}
+ $rd close
+ }
+
+ test "$pop: timeout" {
+ set rd [redis_deferring_client]
+ r del blist1{t} blist2{t}
+ bpop_command_two_key $rd $pop blist1{t} blist2{t} 1
+ wait_for_blocked_client
+ assert_equal {} [$rd read]
+ $rd close
+ }
+
+ test "$pop: arguments are empty" {
+ set rd [redis_deferring_client]
+ r del blist1{t} blist2{t}
+
+ bpop_command_two_key $rd $pop blist1{t} blist2{t} 1
+ wait_for_blocked_client
+ r rpush blist1{t} foo
+ assert_equal {blist1{t} foo} [$rd read]
+ assert_equal 0 [r exists blist1{t}]
+ assert_equal 0 [r exists blist2{t}]
+
+ bpop_command_two_key $rd $pop blist1{t} blist2{t} 1
+ wait_for_blocked_client
+ r rpush blist2{t} foo
+ assert_equal {blist2{t} foo} [$rd read]
+ assert_equal 0 [r exists blist1{t}]
+ assert_equal 0 [r exists blist2{t}]
+ $rd close
+ }
+ }
+
+foreach {pop} {BLPOP BLMPOP_LEFT} {
+ test "$pop inside a transaction" {
+ r del xlist
+ r lpush xlist foo
+ r lpush xlist bar
+ r multi
+
+ bpop_command r $pop xlist 0
+ bpop_command r $pop xlist 0
+ bpop_command r $pop xlist 0
+ r exec
+ } {{xlist bar} {xlist foo} {}}
+}
+
+ test {BLMPOP propagate as pop with count command to replica} {
+ set rd [redis_deferring_client]
+ set repl [attach_to_replication_stream]
+
+ # BLMPOP without being blocked.
+ r lpush mylist{t} a b c
+ r rpush mylist2{t} 1 2 3
+ r blmpop 0 1 mylist{t} left count 1
+ r blmpop 0 2 mylist{t} mylist2{t} right count 10
+ r blmpop 0 2 mylist{t} mylist2{t} right count 10
+
+ # BLMPOP that gets blocked.
+ $rd blmpop 0 1 mylist{t} left count 1
+ wait_for_blocked_client
+ r lpush mylist{t} a
+ $rd blmpop 0 2 mylist{t} mylist2{t} left count 5
+ wait_for_blocked_client
+ r lpush mylist{t} a b c
+ $rd blmpop 0 2 mylist{t} mylist2{t} right count 10
+ wait_for_blocked_client
+ r rpush mylist2{t} a b c
+
+ # Released on timeout.
+ assert_equal {} [r blmpop 0.01 1 mylist{t} left count 10]
+ r set foo{t} bar ;# something else to propagate after, so we can make sure the above pop didn't.
+
+ $rd close
+
+ assert_replication_stream $repl {
+ {select *}
+ {lpush mylist{t} a b c}
+ {rpush mylist2{t} 1 2 3}
+ {lpop mylist{t} 1}
+ {rpop mylist{t} 2}
+ {rpop mylist2{t} 3}
+ {lpush mylist{t} a}
+ {lpop mylist{t} 1}
+ {lpush mylist{t} a b c}
+ {lpop mylist{t} 3}
+ {rpush mylist2{t} a b c}
+ {rpop mylist2{t} 3}
+ {set foo{t} bar}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test {LPUSHX, RPUSHX - generic} {
+ r del xlist
+ assert_equal 0 [r lpushx xlist a]
+ assert_equal 0 [r llen xlist]
+ assert_equal 0 [r rpushx xlist a]
+ assert_equal 0 [r llen xlist]
+ }
+
+ foreach {type large} [array get largevalue] {
+ test "LPUSHX, RPUSHX - $type" {
+ create_$type xlist "$large c"
+ assert_equal 3 [r rpushx xlist d]
+ assert_equal 4 [r lpushx xlist a]
+ assert_equal 6 [r rpushx xlist 42 x]
+ assert_equal 9 [r lpushx xlist y3 y2 y1]
+ assert_equal "y1 y2 y3 a $large c d 42 x" [r lrange xlist 0 -1]
+ }
+
+ test "LINSERT - $type" {
+ create_$type xlist "a $large c d"
+ assert_equal 5 [r linsert xlist before c zz] "before c"
+ assert_equal "a $large zz c d" [r lrange xlist 0 10] "lrangeA"
+ assert_equal 6 [r linsert xlist after c yy] "after c"
+ assert_equal "a $large zz c yy d" [r lrange xlist 0 10] "lrangeB"
+ assert_equal 7 [r linsert xlist after d dd] "after d"
+ assert_equal -1 [r linsert xlist after bad ddd] "after bad"
+ assert_equal "a $large zz c yy d dd" [r lrange xlist 0 10] "lrangeC"
+ assert_equal 8 [r linsert xlist before a aa] "before a"
+ assert_equal -1 [r linsert xlist before bad aaa] "before bad"
+ assert_equal "aa a $large zz c yy d dd" [r lrange xlist 0 10] "lrangeD"
+
+ # check inserting integer encoded value
+ assert_equal 9 [r linsert xlist before aa 42] "before aa"
+ assert_equal 42 [r lrange xlist 0 0] "lrangeE"
+ }
+ }
+
+ test {LINSERT raise error on bad syntax} {
+ catch {[r linsert xlist aft3r aa 42]} e
+ set e
+ } {*ERR*syntax*error*}
+
+ test {LINSERT against non-list value error} {
+ r set k1 v1
+ assert_error {WRONGTYPE Operation against a key holding the wrong kind of value*} {r linsert k1 after 0 0}
+ }
+
+ test {LINSERT against non existing key} {
+ assert_equal 0 [r linsert not-a-key before 0 0]
+ }
+
+foreach type {listpack quicklist} {
+ foreach {num} {250 500} {
+ if {$type == "quicklist"} {
+ set origin_config [config_get_set list-max-listpack-size 5]
+ } else {
+ set origin_config [config_get_set list-max-listpack-size -1]
+ }
+
+ proc check_numbered_list_consistency {key} {
+ set len [r llen $key]
+ for {set i 0} {$i < $len} {incr i} {
+ assert_equal $i [r lindex $key $i]
+ assert_equal [expr $len-1-$i] [r lindex $key [expr (-$i)-1]]
+ }
+ }
+
+ proc check_random_access_consistency {key} {
+ set len [r llen $key]
+ for {set i 0} {$i < $len} {incr i} {
+ set rint [expr int(rand()*$len)]
+ assert_equal $rint [r lindex $key $rint]
+ assert_equal [expr $len-1-$rint] [r lindex $key [expr (-$rint)-1]]
+ }
+ }
+
+ test "LINDEX consistency test - $type" {
+ r del mylist
+ for {set i 0} {$i < $num} {incr i} {
+ r rpush mylist $i
+ }
+ assert_encoding $type mylist
+ check_numbered_list_consistency mylist
+ }
+
+ test "LINDEX random access - $type" {
+ assert_encoding $type mylist
+ check_random_access_consistency mylist
+ }
+
+ test "Check if list is still ok after a DEBUG RELOAD - $type" {
+ r debug reload
+ assert_encoding $type mylist
+ check_numbered_list_consistency mylist
+ check_random_access_consistency mylist
+ } {} {needs:debug}
+
+ config_set list-max-listpack-size $origin_config
+ }
+}
+
+ test {LLEN against non-list value error} {
+ r del mylist
+ r set mylist foobar
+ assert_error WRONGTYPE* {r llen mylist}
+ }
+
+ test {LLEN against non existing key} {
+ assert_equal 0 [r llen not-a-key]
+ }
+
+ test {LINDEX against non-list value error} {
+ assert_error WRONGTYPE* {r lindex mylist 0}
+ }
+
+ test {LINDEX against non existing key} {
+ assert_equal "" [r lindex not-a-key 10]
+ }
+
+ test {LPUSH against non-list value error} {
+ assert_error WRONGTYPE* {r lpush mylist 0}
+ }
+
+ test {RPUSH against non-list value error} {
+ assert_error WRONGTYPE* {r rpush mylist 0}
+ }
+
+ foreach {type large} [array get largevalue] {
+ test "RPOPLPUSH base case - $type" {
+ r del mylist1{t} mylist2{t}
+ create_$type mylist1{t} "a $large c d"
+ assert_equal d [r rpoplpush mylist1{t} mylist2{t}]
+ assert_equal c [r rpoplpush mylist1{t} mylist2{t}]
+ assert_equal $large [r rpoplpush mylist1{t} mylist2{t}]
+ assert_equal "a" [r lrange mylist1{t} 0 -1]
+ assert_equal "$large c d" [r lrange mylist2{t} 0 -1]
+ assert_encoding listpack mylist1{t} ;# converted to listpack after shrinking
+ assert_encoding $type mylist2{t}
+ }
+
+ foreach wherefrom {left right} {
+ foreach whereto {left right} {
+ test "LMOVE $wherefrom $whereto base case - $type" {
+ r del mylist1{t} mylist2{t}
+
+ if {$wherefrom eq "right"} {
+ create_$type mylist1{t} "c d $large a"
+ } else {
+ create_$type mylist1{t} "a $large c d"
+ }
+ assert_equal a [r lmove mylist1{t} mylist2{t} $wherefrom $whereto]
+ assert_equal $large [r lmove mylist1{t} mylist2{t} $wherefrom $whereto]
+ assert_equal "c d" [r lrange mylist1{t} 0 -1]
+ if {$whereto eq "right"} {
+ assert_equal "a $large" [r lrange mylist2{t} 0 -1]
+ } else {
+ assert_equal "$large a" [r lrange mylist2{t} 0 -1]
+ }
+ assert_encoding $type mylist2{t}
+ }
+ }
+ }
+
+ test "RPOPLPUSH with the same list as src and dst - $type" {
+ create_$type mylist{t} "a $large c"
+ assert_equal "a $large c" [r lrange mylist{t} 0 -1]
+ assert_equal c [r rpoplpush mylist{t} mylist{t}]
+ assert_equal "c a $large" [r lrange mylist{t} 0 -1]
+ }
+
+ foreach wherefrom {left right} {
+ foreach whereto {left right} {
+ test "LMOVE $wherefrom $whereto with the same list as src and dst - $type" {
+ if {$wherefrom eq "right"} {
+ create_$type mylist{t} "a $large c"
+ assert_equal "a $large c" [r lrange mylist{t} 0 -1]
+ } else {
+ create_$type mylist{t} "c a $large"
+ assert_equal "c a $large" [r lrange mylist{t} 0 -1]
+ }
+ assert_equal c [r lmove mylist{t} mylist{t} $wherefrom $whereto]
+ if {$whereto eq "right"} {
+ assert_equal "a $large c" [r lrange mylist{t} 0 -1]
+ } else {
+ assert_equal "c a $large" [r lrange mylist{t} 0 -1]
+ }
+ }
+ }
+ }
+
+ foreach {othertype otherlarge} [array get largevalue] {
+ test "RPOPLPUSH with $type source and existing target $othertype" {
+ create_$type srclist{t} "a b c $large"
+ create_$othertype dstlist{t} "$otherlarge"
+ assert_equal $large [r rpoplpush srclist{t} dstlist{t}]
+ assert_equal c [r rpoplpush srclist{t} dstlist{t}]
+ assert_equal "a b" [r lrange srclist{t} 0 -1]
+ assert_equal "c $large $otherlarge" [r lrange dstlist{t} 0 -1]
+
+ # When we rpoplpush'ed a large value, dstlist should be
+ # converted to the same encoding as srclist.
+ if {$type eq "quicklist"} {
+ assert_encoding quicklist dstlist{t}
+ }
+ }
+
+ foreach wherefrom {left right} {
+ foreach whereto {left right} {
+ test "LMOVE $wherefrom $whereto with $type source and existing target $othertype" {
+ create_$othertype dstlist{t} "$otherlarge"
+
+ if {$wherefrom eq "right"} {
+ create_$type srclist{t} "a b c $large"
+ } else {
+ create_$type srclist{t} "$large c a b"
+ }
+ assert_equal $large [r lmove srclist{t} dstlist{t} $wherefrom $whereto]
+ assert_equal c [r lmove srclist{t} dstlist{t} $wherefrom $whereto]
+ assert_equal "a b" [r lrange srclist{t} 0 -1]
+
+ if {$whereto eq "right"} {
+ assert_equal "$otherlarge $large c" [r lrange dstlist{t} 0 -1]
+ } else {
+ assert_equal "c $large $otherlarge" [r lrange dstlist{t} 0 -1]
+ }
+
+ # When we lmoved a large value, dstlist should be
+ # converted to the same encoding as srclist.
+ if {$type eq "quicklist"} {
+ assert_encoding quicklist dstlist{t}
+ }
+ }
+ }
+ }
+ }
+ }
+
+ test {RPOPLPUSH against non existing key} {
+ r del srclist{t} dstlist{t}
+ assert_equal {} [r rpoplpush srclist{t} dstlist{t}]
+ assert_equal 0 [r exists srclist{t}]
+ assert_equal 0 [r exists dstlist{t}]
+ }
+
+ test {RPOPLPUSH against non list src key} {
+ r del srclist{t} dstlist{t}
+ r set srclist{t} x
+ assert_error WRONGTYPE* {r rpoplpush srclist{t} dstlist{t}}
+ assert_type string srclist{t}
+ assert_equal 0 [r exists newlist{t}]
+ }
+
+foreach {type large} [array get largevalue] {
+ test "RPOPLPUSH against non list dst key - $type" {
+ create_$type srclist{t} "a $large c d"
+ r set dstlist{t} x
+ assert_error WRONGTYPE* {r rpoplpush srclist{t} dstlist{t}}
+ assert_type string dstlist{t}
+ assert_equal "a $large c d" [r lrange srclist{t} 0 -1]
+ }
+}
+
+ test {RPOPLPUSH against non existing src key} {
+ r del srclist{t} dstlist{t}
+ assert_equal {} [r rpoplpush srclist{t} dstlist{t}]
+ } {}
+
+ foreach {type large} [array get largevalue] {
+ test "Basic LPOP/RPOP/LMPOP - $type" {
+ create_$type mylist "$large 1 2"
+ assert_equal $large [r lpop mylist]
+ assert_equal 2 [r rpop mylist]
+ assert_equal 1 [r lpop mylist]
+ assert_equal 0 [r llen mylist]
+
+ create_$type mylist "$large 1 2"
+ assert_equal "mylist $large" [r lmpop 1 mylist left count 1]
+ assert_equal {mylist {2 1}} [r lmpop 2 mylist mylist right count 2]
+ }
+ }
+
+ test {LPOP/RPOP/LMPOP against empty list} {
+ r del non-existing-list{t} non-existing-list2{t}
+
+ assert_equal {} [r lpop non-existing-list{t}]
+ assert_equal {} [r rpop non-existing-list2{t}]
+
+ assert_equal {} [r lmpop 1 non-existing-list{t} left count 1]
+ assert_equal {} [r lmpop 1 non-existing-list{t} left count 10]
+ assert_equal {} [r lmpop 2 non-existing-list{t} non-existing-list2{t} right count 1]
+ assert_equal {} [r lmpop 2 non-existing-list{t} non-existing-list2{t} right count 10]
+ }
+
+ test {LPOP/RPOP/LMPOP NON-BLOCK or BLOCK against non list value} {
+ r set notalist{t} foo
+ assert_error WRONGTYPE* {r lpop notalist{t}}
+ assert_error WRONGTYPE* {r blpop notalist{t} 0}
+ assert_error WRONGTYPE* {r rpop notalist{t}}
+ assert_error WRONGTYPE* {r brpop notalist{t} 0}
+
+ r del notalist2{t}
+ assert_error "WRONGTYPE*" {r lmpop 2 notalist{t} notalist2{t} left count 1}
+ assert_error "WRONGTYPE*" {r blmpop 0 2 notalist{t} notalist2{t} left count 1}
+
+ r del notalist{t}
+ r set notalist2{t} nolist
+ assert_error "WRONGTYPE*" {r lmpop 2 notalist{t} notalist2{t} right count 10}
+ assert_error "WRONGTYPE*" {r blmpop 0 2 notalist{t} notalist2{t} left count 1}
+ }
+
+ foreach {num} {250 500} {
+ test "Mass RPOP/LPOP - $type" {
+ r del mylist
+ set sum1 0
+ for {set i 0} {$i < $num} {incr i} {
+ if {$i == [expr $num/2]} {
+ r lpush mylist $large
+ }
+ r lpush mylist $i
+ incr sum1 $i
+ }
+ assert_encoding $type mylist
+ set sum2 0
+ for {set i 0} {$i < [expr $num/2]} {incr i} {
+ incr sum2 [r lpop mylist]
+ incr sum2 [r rpop mylist]
+ }
+ assert_equal $sum1 $sum2
+ }
+ }
+
+ test {LMPOP with illegal argument} {
+ assert_error "ERR wrong number of arguments for 'lmpop' command" {r lmpop}
+ assert_error "ERR wrong number of arguments for 'lmpop' command" {r lmpop 1}
+ assert_error "ERR wrong number of arguments for 'lmpop' command" {r lmpop 1 mylist{t}}
+
+ assert_error "ERR numkeys*" {r lmpop 0 mylist{t} LEFT}
+ assert_error "ERR numkeys*" {r lmpop a mylist{t} LEFT}
+ assert_error "ERR numkeys*" {r lmpop -1 mylist{t} RIGHT}
+
+ assert_error "ERR syntax error*" {r lmpop 1 mylist{t} bad_where}
+ assert_error "ERR syntax error*" {r lmpop 1 mylist{t} LEFT bar_arg}
+ assert_error "ERR syntax error*" {r lmpop 1 mylist{t} RIGHT LEFT}
+ assert_error "ERR syntax error*" {r lmpop 1 mylist{t} COUNT}
+ assert_error "ERR syntax error*" {r lmpop 1 mylist{t} LEFT COUNT 1 COUNT 2}
+ assert_error "ERR syntax error*" {r lmpop 2 mylist{t} mylist2{t} bad_arg}
+
+ assert_error "ERR count*" {r lmpop 1 mylist{t} LEFT COUNT 0}
+ assert_error "ERR count*" {r lmpop 1 mylist{t} RIGHT COUNT a}
+ assert_error "ERR count*" {r lmpop 1 mylist{t} LEFT COUNT -1}
+ assert_error "ERR count*" {r lmpop 2 mylist{t} mylist2{t} RIGHT COUNT -1}
+ }
+
+foreach {type large} [array get largevalue] {
+ test "LMPOP single existing list - $type" {
+ # Same key multiple times.
+ create_$type mylist{t} "a b $large d e f"
+ assert_equal {mylist{t} {a b}} [r lmpop 2 mylist{t} mylist{t} left count 2]
+ assert_equal {mylist{t} {f e}} [r lmpop 2 mylist{t} mylist{t} right count 2]
+ assert_equal 2 [r llen mylist{t}]
+
+ # First one exists, second one does not exist.
+ create_$type mylist{t} "a b $large d e"
+ r del mylist2{t}
+ assert_equal {mylist{t} a} [r lmpop 2 mylist{t} mylist2{t} left count 1]
+ assert_equal 4 [r llen mylist{t}]
+ assert_equal "mylist{t} {e d $large b}" [r lmpop 2 mylist{t} mylist2{t} right count 10]
+ assert_equal {} [r lmpop 2 mylist{t} mylist2{t} right count 1]
+
+ # First one does not exist, second one exists.
+ r del mylist{t}
+ create_$type mylist2{t} "1 2 $large 4 5"
+ assert_equal {mylist2{t} 5} [r lmpop 2 mylist{t} mylist2{t} right count 1]
+ assert_equal 4 [r llen mylist2{t}]
+ assert_equal "mylist2{t} {1 2 $large 4}" [r lmpop 2 mylist{t} mylist2{t} left count 10]
+
+ assert_equal 0 [r exists mylist{t} mylist2{t}]
+ }
+
+ test "LMPOP multiple existing lists - $type" {
+ create_$type mylist{t} "a b $large d e"
+ create_$type mylist2{t} "1 2 $large 4 5"
+
+ # Pop up from the first key.
+ assert_equal {mylist{t} {a b}} [r lmpop 2 mylist{t} mylist2{t} left count 2]
+ assert_equal 3 [r llen mylist{t}]
+ assert_equal "mylist{t} {e d $large}" [r lmpop 2 mylist{t} mylist2{t} right count 3]
+ assert_equal 0 [r exists mylist{t}]
+
+ # Pop up from the second key.
+ assert_equal "mylist2{t} {1 2 $large}" [r lmpop 2 mylist{t} mylist2{t} left count 3]
+ assert_equal 2 [r llen mylist2{t}]
+ assert_equal {mylist2{t} {5 4}} [r lmpop 2 mylist{t} mylist2{t} right count 2]
+ assert_equal 0 [r exists mylist{t}]
+
+ # Pop up all elements.
+ create_$type mylist{t} "a $large c"
+ create_$type mylist2{t} "1 $large 3"
+ assert_equal "mylist{t} {a $large c}" [r lmpop 2 mylist{t} mylist2{t} left count 10]
+ assert_equal 0 [r llen mylist{t}]
+ assert_equal "mylist2{t} {3 $large 1}" [r lmpop 2 mylist{t} mylist2{t} right count 10]
+ assert_equal 0 [r llen mylist2{t}]
+ assert_equal 0 [r exists mylist{t} mylist2{t}]
+ }
+}
+
+ test {LMPOP propagate as pop with count command to replica} {
+ set repl [attach_to_replication_stream]
+
+ # left/right propagate as lpop/rpop with count
+ r lpush mylist{t} a b c
+
+ # Pop elements from one list.
+ r lmpop 1 mylist{t} left count 1
+ r lmpop 1 mylist{t} right count 1
+
+ # Now the list have only one element
+ r lmpop 2 mylist{t} mylist2{t} left count 10
+
+ # No elements so we don't propagate.
+ r lmpop 2 mylist{t} mylist2{t} left count 10
+
+ # Pop elements from the second list.
+ r rpush mylist2{t} 1 2 3
+ r lmpop 2 mylist{t} mylist2{t} left count 2
+ r lmpop 2 mylist{t} mylist2{t} right count 1
+
+ # Pop all elements.
+ r rpush mylist{t} a b c
+ r rpush mylist2{t} 1 2 3
+ r lmpop 2 mylist{t} mylist2{t} left count 10
+ r lmpop 2 mylist{t} mylist2{t} right count 10
+
+ assert_replication_stream $repl {
+ {select *}
+ {lpush mylist{t} a b c}
+ {lpop mylist{t} 1}
+ {rpop mylist{t} 1}
+ {lpop mylist{t} 1}
+ {rpush mylist2{t} 1 2 3}
+ {lpop mylist2{t} 2}
+ {rpop mylist2{t} 1}
+ {rpush mylist{t} a b c}
+ {rpush mylist2{t} 1 2 3}
+ {lpop mylist{t} 3}
+ {rpop mylist2{t} 3}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ foreach {type large} [array get largevalue] {
+ test "LRANGE basics - $type" {
+ create_$type mylist "$large 1 2 3 4 5 6 7 8 9"
+ assert_equal {1 2 3 4 5 6 7 8} [r lrange mylist 1 -2]
+ assert_equal {7 8 9} [r lrange mylist -3 -1]
+ assert_equal {4} [r lrange mylist 4 4]
+ }
+
+ test "LRANGE inverted indexes - $type" {
+ create_$type mylist "$large 1 2 3 4 5 6 7 8 9"
+ assert_equal {} [r lrange mylist 6 2]
+ }
+
+ test "LRANGE out of range indexes including the full list - $type" {
+ create_$type mylist "$large 1 2 3"
+ assert_equal "$large 1 2 3" [r lrange mylist -1000 1000]
+ }
+
+ test "LRANGE out of range negative end index - $type" {
+ create_$type mylist "$large 1 2 3"
+ assert_equal $large [r lrange mylist 0 -4]
+ assert_equal {} [r lrange mylist 0 -5]
+ }
+ }
+
+ test {LRANGE against non existing key} {
+ assert_equal {} [r lrange nosuchkey 0 1]
+ }
+
+ test {LRANGE with start > end yields an empty array for backward compatibility} {
+ create_$type mylist "1 $large 3"
+ assert_equal {} [r lrange mylist 1 0]
+ assert_equal {} [r lrange mylist -1 -2]
+ }
+
+ foreach {type large} [array get largevalue] {
+ proc trim_list {type min max} {
+ upvar 1 large large
+ r del mylist
+ create_$type mylist "1 2 3 4 $large"
+ r ltrim mylist $min $max
+ r lrange mylist 0 -1
+ }
+
+ test "LTRIM basics - $type" {
+ assert_equal "1" [trim_list $type 0 0]
+ assert_equal "1 2" [trim_list $type 0 1]
+ assert_equal "1 2 3" [trim_list $type 0 2]
+ assert_equal "2 3" [trim_list $type 1 2]
+ assert_equal "2 3 4 $large" [trim_list $type 1 -1]
+ assert_equal "2 3 4" [trim_list $type 1 -2]
+ assert_equal "4 $large" [trim_list $type -2 -1]
+ assert_equal "$large" [trim_list $type -1 -1]
+ assert_equal "1 2 3 4 $large" [trim_list $type -5 -1]
+ assert_equal "1 2 3 4 $large" [trim_list $type -10 10]
+ assert_equal "1 2 3 4 $large" [trim_list $type 0 5]
+ assert_equal "1 2 3 4 $large" [trim_list $type 0 10]
+ }
+
+ test "LTRIM out of range negative end index - $type" {
+ assert_equal {1} [trim_list $type 0 -5]
+ assert_equal {} [trim_list $type 0 -6]
+ }
+
+ test "LSET - $type" {
+ create_$type mylist "99 98 $large 96 95"
+ r lset mylist 1 foo
+ r lset mylist -1 bar
+ assert_equal "99 foo $large 96 bar" [r lrange mylist 0 -1]
+ }
+
+ test "LSET out of range index - $type" {
+ assert_error ERR*range* {r lset mylist 10 foo}
+ }
+ }
+
+ test {LSET against non existing key} {
+ assert_error ERR*key* {r lset nosuchkey 10 foo}
+ }
+
+ test {LSET against non list value} {
+ r set nolist foobar
+ assert_error WRONGTYPE* {r lset nolist 0 foo}
+ }
+
+ foreach {type e} [array get largevalue] {
+ test "LREM remove all the occurrences - $type" {
+ create_$type mylist "$e foo bar foobar foobared zap bar test foo"
+ assert_equal 2 [r lrem mylist 0 bar]
+ assert_equal "$e foo foobar foobared zap test foo" [r lrange mylist 0 -1]
+ }
+
+ test "LREM remove the first occurrence - $type" {
+ assert_equal 1 [r lrem mylist 1 foo]
+ assert_equal "$e foobar foobared zap test foo" [r lrange mylist 0 -1]
+ }
+
+ test "LREM remove non existing element - $type" {
+ assert_equal 0 [r lrem mylist 1 nosuchelement]
+ assert_equal "$e foobar foobared zap test foo" [r lrange mylist 0 -1]
+ }
+
+ test "LREM starting from tail with negative count - $type" {
+ create_$type mylist "$e foo bar foobar foobared zap bar test foo foo"
+ assert_equal 1 [r lrem mylist -1 bar]
+ assert_equal "$e foo bar foobar foobared zap test foo foo" [r lrange mylist 0 -1]
+ }
+
+ test "LREM starting from tail with negative count (2) - $type" {
+ assert_equal 2 [r lrem mylist -2 foo]
+ assert_equal "$e foo bar foobar foobared zap test" [r lrange mylist 0 -1]
+ }
+
+ test "LREM deleting objects that may be int encoded - $type" {
+ create_$type myotherlist "$e 1 2 3"
+ assert_equal 1 [r lrem myotherlist 1 2]
+ assert_equal 3 [r llen myotherlist]
+ }
+ }
+
+ test "Regression for bug 593 - chaining BRPOPLPUSH with other blocking cmds" {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+
+ $rd1 brpoplpush a{t} b{t} 0
+ $rd1 brpoplpush a{t} b{t} 0
+ wait_for_blocked_clients_count 1
+ $rd2 brpoplpush b{t} c{t} 0
+ wait_for_blocked_clients_count 2
+ r lpush a{t} data
+ $rd1 close
+ $rd2 close
+ r ping
+ } {PONG}
+
+ test "BLPOP/BLMOVE should increase dirty" {
+ r del lst{t} lst1{t}
+ set rd [redis_deferring_client]
+
+ set dirty [s rdb_changes_since_last_save]
+ $rd blpop lst{t} 0
+ wait_for_blocked_client
+ r lpush lst{t} a
+ assert_equal {lst{t} a} [$rd read]
+ set dirty2 [s rdb_changes_since_last_save]
+ assert {$dirty2 == $dirty + 2}
+
+ set dirty [s rdb_changes_since_last_save]
+ $rd blmove lst{t} lst1{t} left left 0
+ wait_for_blocked_client
+ r lpush lst{t} a
+ assert_equal {a} [$rd read]
+ set dirty2 [s rdb_changes_since_last_save]
+ assert {$dirty2 == $dirty + 2}
+
+ $rd close
+ }
+
+foreach {pop} {BLPOP BLMPOP_RIGHT} {
+ test "client unblock tests" {
+ r del l
+ set rd [redis_deferring_client]
+ $rd client id
+ set id [$rd read]
+
+ # test default args
+ bpop_command $rd $pop l 0
+ wait_for_blocked_client
+ r client unblock $id
+ assert_equal {} [$rd read]
+
+ # test with timeout
+ bpop_command $rd $pop l 0
+ wait_for_blocked_client
+ r client unblock $id TIMEOUT
+ assert_equal {} [$rd read]
+
+ # test with error
+ bpop_command $rd $pop l 0
+ wait_for_blocked_client
+ r client unblock $id ERROR
+ catch {[$rd read]} e
+ assert_equal $e "UNBLOCKED client unblocked via CLIENT UNBLOCK"
+
+ # test with invalid client id
+ catch {[r client unblock asd]} e
+ assert_equal $e "ERR value is not an integer or out of range"
+
+ # test with non blocked client
+ set myid [r client id]
+ catch {[r client unblock $myid]} e
+ assert_equal $e {invalid command name "0"}
+
+ # finally, see the this client and list are still functional
+ bpop_command $rd $pop l 0
+ wait_for_blocked_client
+ r lpush l foo
+ assert_equal {l foo} [$rd read]
+ $rd close
+ }
+}
+
+ foreach {max_lp_size large} "3 $largevalue(listpack) -1 $largevalue(quicklist)" {
+ test "List listpack -> quicklist encoding conversion" {
+ set origin_conf [config_get_set list-max-listpack-size $max_lp_size]
+
+ # RPUSH
+ create_listpack lst "a b c"
+ r RPUSH lst $large
+ assert_encoding quicklist lst
+
+ # LINSERT
+ create_listpack lst "a b c"
+ r LINSERT lst after b $large
+ assert_encoding quicklist lst
+
+ # LSET
+ create_listpack lst "a b c"
+ r LSET lst 0 $large
+ assert_encoding quicklist lst
+
+ # LMOVE
+ create_quicklist lsrc{t} "a b c $large"
+ create_listpack ldes{t} "d e f"
+ r LMOVE lsrc{t} ldes{t} right right
+ assert_encoding quicklist ldes{t}
+
+ r config set list-max-listpack-size $origin_conf
+ }
+ }
+
+ test "List quicklist -> listpack encoding conversion" {
+ set origin_conf [config_get_set list-max-listpack-size 3]
+
+ # RPOP
+ create_quicklist lst "a b c d"
+ r RPOP lst 3
+ assert_encoding listpack lst
+
+ # LREM
+ create_quicklist lst "a a a d"
+ r LREM lst 3 a
+ assert_encoding listpack lst
+
+ # LTRIM
+ create_quicklist lst "a b c d"
+ r LTRIM lst 1 1
+ assert_encoding listpack lst
+
+ r config set list-max-listpack-size -1
+
+ # RPOP
+ create_quicklist lst "a b c $largevalue(quicklist)"
+ r RPOP lst 1
+ assert_encoding listpack lst
+
+ # LREM
+ create_quicklist lst "a $largevalue(quicklist)"
+ r LREM lst 1 $largevalue(quicklist)
+ assert_encoding listpack lst
+
+ # LTRIM
+ create_quicklist lst "a b $largevalue(quicklist)"
+ r LTRIM lst 0 1
+ assert_encoding listpack lst
+
+ # LSET
+ create_quicklist lst "$largevalue(quicklist) a b"
+ r RPOP lst 2
+ assert_encoding quicklist lst
+ r LSET lst -1 c
+ assert_encoding listpack lst
+
+ r config set list-max-listpack-size $origin_conf
+ }
+
+ test "List encoding conversion when RDB loading" {
+ set origin_conf [config_get_set list-max-listpack-size 3]
+ create_listpack lst "a b c"
+
+ # list is still a listpack after DEBUG RELOAD
+ r DEBUG RELOAD
+ assert_encoding listpack lst
+
+ # list is still a quicklist after DEBUG RELOAD
+ r RPUSH lst d
+ r DEBUG RELOAD
+ assert_encoding quicklist lst
+
+ # when a quicklist has only one packed node, it will be
+ # converted to listpack during rdb loading
+ r RPOP lst
+ assert_encoding quicklist lst
+ r DEBUG RELOAD
+ assert_encoding listpack lst
+
+ r config set list-max-listpack-size $origin_conf
+ } {OK} {needs:debug}
+
+ test "List invalid list-max-listpack-size config" {
+ # ​When list-max-listpack-size is 0 we treat it as 1 and it'll
+ # still be listpack if there's a single element in the list.
+ r config set list-max-listpack-size 0
+ r DEL lst
+ r RPUSH lst a
+ assert_encoding listpack lst
+ r RPUSH lst b
+ assert_encoding quicklist lst
+
+ # When list-max-listpack-size < -5 we treat it as -5.
+ r config set list-max-listpack-size -6
+ r DEL lst
+ r RPUSH lst [string repeat "x" 60000]
+ assert_encoding listpack lst
+ # Converted to quicklist when the size of listpack exceed 65536
+ r RPUSH lst [string repeat "x" 5536]
+ assert_encoding quicklist lst
+ }
+
+ test "List of various encodings" {
+ r del k
+ r lpush k 127 ;# ZIP_INT_8B
+ r lpush k 32767 ;# ZIP_INT_16B
+ r lpush k 2147483647 ;# ZIP_INT_32B
+ r lpush k 9223372036854775808 ;# ZIP_INT_64B
+ r lpush k 0 ;# ZIP_INT_IMM_MIN
+ r lpush k 12 ;# ZIP_INT_IMM_MAX
+ r lpush k [string repeat x 31] ;# ZIP_STR_06B
+ r lpush k [string repeat x 8191] ;# ZIP_STR_14B
+ r lpush k [string repeat x 65535] ;# ZIP_STR_32B
+ assert_encoding quicklist k ;# exceeds the size limit of quicklist node
+ set k [r lrange k 0 -1]
+ set dump [r dump k]
+
+ # coverage for objectComputeSize
+ assert_morethan [memory_usage k] 0
+
+ config_set sanitize-dump-payload no mayfail
+ r restore kk 0 $dump replace
+ assert_encoding quicklist kk
+ set kk [r lrange kk 0 -1]
+
+ # try some forward and backward searches to make sure all encodings
+ # can be traversed
+ assert_equal [r lindex kk 5] {9223372036854775808}
+ assert_equal [r lindex kk -5] {0}
+ assert_equal [r lpos kk foo rank 1] {}
+ assert_equal [r lpos kk foo rank -1] {}
+
+ # make sure the values are right
+ assert_equal $k $kk
+ assert_equal [lpop k] [string repeat x 65535]
+ assert_equal [lpop k] [string repeat x 8191]
+ assert_equal [lpop k] [string repeat x 31]
+ set _ $k
+ } {12 0 9223372036854775808 2147483647 32767 127}
+
+ test "List of various encodings - sanitize dump" {
+ config_set sanitize-dump-payload yes mayfail
+ r restore kk 0 $dump replace
+ assert_encoding quicklist kk
+ set k [r lrange k 0 -1]
+ set kk [r lrange kk 0 -1]
+
+ # make sure the values are right
+ assert_equal $k $kk
+ assert_equal [lpop k] [string repeat x 65535]
+ assert_equal [lpop k] [string repeat x 8191]
+ assert_equal [lpop k] [string repeat x 31]
+ set _ $k
+ } {12 0 9223372036854775808 2147483647 32767 127}
+
+ test "Unblock fairness is kept while pipelining" {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+
+ # delete the list in case already exists
+ r del mylist
+
+ # block a client on the list
+ $rd1 BLPOP mylist 0
+ wait_for_blocked_clients_count 1
+
+ # pipeline on other client a list push and a blocking pop
+ # we should expect the fairness to be kept and have $rd1
+ # being unblocked
+ set buf ""
+ append buf "LPUSH mylist 1\r\n"
+ append buf "BLPOP mylist 0\r\n"
+ $rd2 write $buf
+ $rd2 flush
+
+ # we check that we still have 1 blocked client
+ # and that the first blocked client has been served
+ assert_equal [$rd1 read] {mylist 1}
+ assert_equal [$rd2 read] {1}
+ wait_for_blocked_clients_count 1
+
+ # We no unblock the last client and verify it was served last
+ r LPUSH mylist 2
+ wait_for_blocked_clients_count 0
+ assert_equal [$rd2 read] {mylist 2}
+
+ $rd1 close
+ $rd2 close
+ }
+
+ test "Unblock fairness is kept during nested unblock" {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+ set rd3 [redis_deferring_client]
+
+ # delete the list in case already exists
+ r del l1{t} l2{t} l3{t}
+
+ # block a client on the list
+ $rd1 BRPOPLPUSH l1{t} l3{t} 0
+ wait_for_blocked_clients_count 1
+
+ $rd2 BLPOP l2{t} 0
+ wait_for_blocked_clients_count 2
+
+ $rd3 BLMPOP 0 2 l2{t} l3{t} LEFT COUNT 1
+ wait_for_blocked_clients_count 3
+
+ r multi
+ r lpush l1{t} 1
+ r lpush l2{t} 2
+ r exec
+
+ wait_for_blocked_clients_count 0
+
+ assert_equal [$rd1 read] {1}
+ assert_equal [$rd2 read] {l2{t} 2}
+ assert_equal [$rd3 read] {l3{t} 1}
+
+ $rd1 close
+ $rd2 close
+ $rd3 close
+ }
+
+ test "Blocking command accounted only once in commandstats" {
+ # cleanup first
+ r del mylist
+
+ # create a test client
+ set rd [redis_deferring_client]
+
+ # reset the server stats
+ r config resetstat
+
+ # block a client on the list
+ $rd BLPOP mylist 0
+ wait_for_blocked_clients_count 1
+
+ # unblock the list
+ r LPUSH mylist 1
+ wait_for_blocked_clients_count 0
+
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=0} [cmdrstat blpop r]
+
+ $rd close
+ }
+
+ test "Blocking command accounted only once in commandstats after timeout" {
+ # cleanup first
+ r del mylist
+
+ # create a test client
+ set rd [redis_deferring_client]
+ $rd client id
+ set id [$rd read]
+
+ # reset the server stats
+ r config resetstat
+
+ # block a client on the list
+ $rd BLPOP mylist 0
+ wait_for_blocked_clients_count 1
+
+ # unblock the client on timeout
+ r client unblock $id timeout
+
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=0} [cmdrstat blpop r]
+
+ $rd close
+ }
+
+ test {Command being unblocked cause another command to get unblocked execution order test} {
+ r del src{t} dst{t} key1{t} key2{t} key3{t}
+ set repl [attach_to_replication_stream]
+
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+ set rd3 [redis_deferring_client]
+
+ $rd1 blmove src{t} dst{t} left right 0
+ wait_for_blocked_clients_count 1
+
+ $rd2 blmove dst{t} src{t} right left 0
+ wait_for_blocked_clients_count 2
+
+ # Create a pipeline of commands that will be processed in one socket read.
+ # Insert two set commands before and after lpush to observe the execution order.
+ set buf ""
+ append buf "set key1{t} value1\r\n"
+ append buf "lpush src{t} dummy\r\n"
+ append buf "set key2{t} value2\r\n"
+ $rd3 write $buf
+ $rd3 flush
+
+ wait_for_blocked_clients_count 0
+
+ r set key3{t} value3
+
+ # If a command being unblocked causes another command to get unblocked, like a BLMOVE would do,
+ # then the new unblocked command will get processed right away rather than wait for later.
+ # If the set command occurs between two lmove commands, the results are not as expected.
+ assert_replication_stream $repl {
+ {select *}
+ {set key1{t} value1}
+ {lpush src{t} dummy}
+ {lmove src{t} dst{t} left right}
+ {lmove dst{t} src{t} right left}
+ {set key2{t} value2}
+ {set key3{t} value3}
+ }
+
+ $rd1 close
+ $rd2 close
+ $rd3 close
+
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+} ;# stop servers
diff --git a/tests/unit/type/set.tcl b/tests/unit/type/set.tcl
new file mode 100644
index 0000000..2927562
--- /dev/null
+++ b/tests/unit/type/set.tcl
@@ -0,0 +1,1305 @@
+start_server {
+ tags {"set"}
+ overrides {
+ "set-max-intset-entries" 512
+ "set-max-listpack-entries" 128
+ "set-max-listpack-value" 32
+ }
+} {
+ proc create_set {key entries} {
+ r del $key
+ foreach entry $entries { r sadd $key $entry }
+ }
+
+ # Values for initialing sets, per encoding.
+ array set initelems {listpack {foo} hashtable {foo}}
+ for {set i 0} {$i < 130} {incr i} {
+ lappend initelems(hashtable) [format "i%03d" $i]
+ }
+
+ foreach type {listpack hashtable} {
+ test "SADD, SCARD, SISMEMBER, SMISMEMBER, SMEMBERS basics - $type" {
+ create_set myset $initelems($type)
+ assert_encoding $type myset
+ assert_equal 1 [r sadd myset bar]
+ assert_equal 0 [r sadd myset bar]
+ assert_equal [expr [llength $initelems($type)] + 1] [r scard myset]
+ assert_equal 1 [r sismember myset foo]
+ assert_equal 1 [r sismember myset bar]
+ assert_equal 0 [r sismember myset bla]
+ assert_equal {1} [r smismember myset foo]
+ assert_equal {1 1} [r smismember myset foo bar]
+ assert_equal {1 0} [r smismember myset foo bla]
+ assert_equal {0 1} [r smismember myset bla foo]
+ assert_equal {0} [r smismember myset bla]
+ assert_equal "bar $initelems($type)" [lsort [r smembers myset]]
+ }
+ }
+
+ test {SADD, SCARD, SISMEMBER, SMISMEMBER, SMEMBERS basics - intset} {
+ create_set myset {17}
+ assert_encoding intset myset
+ assert_equal 1 [r sadd myset 16]
+ assert_equal 0 [r sadd myset 16]
+ assert_equal 2 [r scard myset]
+ assert_equal 1 [r sismember myset 16]
+ assert_equal 1 [r sismember myset 17]
+ assert_equal 0 [r sismember myset 18]
+ assert_equal {1} [r smismember myset 16]
+ assert_equal {1 1} [r smismember myset 16 17]
+ assert_equal {1 0} [r smismember myset 16 18]
+ assert_equal {0 1} [r smismember myset 18 16]
+ assert_equal {0} [r smismember myset 18]
+ assert_equal {16 17} [lsort [r smembers myset]]
+ }
+
+ test {SMISMEMBER SMEMBERS SCARD against non set} {
+ r lpush mylist foo
+ assert_error WRONGTYPE* {r smismember mylist bar}
+ assert_error WRONGTYPE* {r smembers mylist}
+ assert_error WRONGTYPE* {r scard mylist}
+ }
+
+ test {SMISMEMBER SMEMBERS SCARD against non existing key} {
+ assert_equal {0} [r smismember myset1 foo]
+ assert_equal {0 0} [r smismember myset1 foo bar]
+ assert_equal {} [r smembers myset1]
+ assert_equal {0} [r scard myset1]
+ }
+
+ test {SMISMEMBER requires one or more members} {
+ r del zmscoretest
+ r zadd zmscoretest 10 x
+ r zadd zmscoretest 20 y
+
+ catch {r smismember zmscoretest} e
+ assert_match {*ERR*wrong*number*arg*} $e
+ }
+
+ test {SADD against non set} {
+ r lpush mylist foo
+ assert_error WRONGTYPE* {r sadd mylist bar}
+ }
+
+ test "SADD a non-integer against a small intset" {
+ create_set myset {1 2 3}
+ assert_encoding intset myset
+ assert_equal 1 [r sadd myset a]
+ assert_encoding listpack myset
+ }
+
+ test "SADD a non-integer against a large intset" {
+ create_set myset {0}
+ for {set i 1} {$i < 130} {incr i} {r sadd myset $i}
+ assert_encoding intset myset
+ assert_equal 1 [r sadd myset a]
+ assert_encoding hashtable myset
+ }
+
+ test "SADD an integer larger than 64 bits" {
+ create_set myset {213244124402402314402033402}
+ assert_encoding listpack myset
+ assert_equal 1 [r sismember myset 213244124402402314402033402]
+ assert_equal {1} [r smismember myset 213244124402402314402033402]
+ }
+
+ test "SADD an integer larger than 64 bits to a large intset" {
+ create_set myset {0}
+ for {set i 1} {$i < 130} {incr i} {r sadd myset $i}
+ assert_encoding intset myset
+ r sadd myset 213244124402402314402033402
+ assert_encoding hashtable myset
+ assert_equal 1 [r sismember myset 213244124402402314402033402]
+ assert_equal {1} [r smismember myset 213244124402402314402033402]
+ }
+
+foreach type {single multiple single_multiple} {
+ test "SADD overflows the maximum allowed integers in an intset - $type" {
+ r del myset
+
+ if {$type == "single"} {
+ # All are single sadd commands.
+ for {set i 0} {$i < 512} {incr i} { r sadd myset $i }
+ } elseif {$type == "multiple"} {
+ # One sadd command to add all elements.
+ set args {}
+ for {set i 0} {$i < 512} {incr i} { lappend args $i }
+ r sadd myset {*}$args
+ } elseif {$type == "single_multiple"} {
+ # First one sadd adds an element (creates a key) and then one sadd adds all elements.
+ r sadd myset 1
+ set args {}
+ for {set i 0} {$i < 512} {incr i} { lappend args $i }
+ r sadd myset {*}$args
+ }
+
+ assert_encoding intset myset
+ assert_equal 512 [r scard myset]
+ assert_equal 1 [r sadd myset 512]
+ assert_encoding hashtable myset
+ }
+
+ test "SADD overflows the maximum allowed elements in a listpack - $type" {
+ r del myset
+
+ if {$type == "single"} {
+ # All are single sadd commands.
+ r sadd myset a
+ for {set i 0} {$i < 127} {incr i} { r sadd myset $i }
+ } elseif {$type == "multiple"} {
+ # One sadd command to add all elements.
+ set args {}
+ lappend args a
+ for {set i 0} {$i < 127} {incr i} { lappend args $i }
+ r sadd myset {*}$args
+ } elseif {$type == "single_multiple"} {
+ # First one sadd adds an element (creates a key) and then one sadd adds all elements.
+ r sadd myset a
+ set args {}
+ lappend args a
+ for {set i 0} {$i < 127} {incr i} { lappend args $i }
+ r sadd myset {*}$args
+ }
+
+ assert_encoding listpack myset
+ assert_equal 128 [r scard myset]
+ assert_equal 1 [r sadd myset b]
+ assert_encoding hashtable myset
+ }
+}
+
+ test {Variadic SADD} {
+ r del myset
+ assert_equal 3 [r sadd myset a b c]
+ assert_equal 2 [r sadd myset A a b c B]
+ assert_equal [lsort {A a b c B}] [lsort [r smembers myset]]
+ }
+
+ test "Set encoding after DEBUG RELOAD" {
+ r del myintset
+ r del myhashset
+ r del mylargeintset
+ r del mysmallset
+ for {set i 0} {$i < 100} {incr i} { r sadd myintset $i }
+ for {set i 0} {$i < 1280} {incr i} { r sadd mylargeintset $i }
+ for {set i 0} {$i < 50} {incr i} { r sadd mysmallset [format "i%03d" $i] }
+ for {set i 0} {$i < 256} {incr i} { r sadd myhashset [format "i%03d" $i] }
+ assert_encoding intset myintset
+ assert_encoding hashtable mylargeintset
+ assert_encoding listpack mysmallset
+ assert_encoding hashtable myhashset
+
+ r debug reload
+ assert_encoding intset myintset
+ assert_encoding hashtable mylargeintset
+ assert_encoding listpack mysmallset
+ assert_encoding hashtable myhashset
+ } {} {needs:debug}
+
+ foreach type {listpack hashtable} {
+ test {SREM basics - $type} {
+ create_set myset $initelems($type)
+ r sadd myset ciao
+ assert_encoding $type myset
+ assert_equal 0 [r srem myset qux]
+ assert_equal 1 [r srem myset ciao]
+ assert_equal $initelems($type) [lsort [r smembers myset]]
+ }
+ }
+
+ test {SREM basics - intset} {
+ create_set myset {3 4 5}
+ assert_encoding intset myset
+ assert_equal 0 [r srem myset 6]
+ assert_equal 1 [r srem myset 4]
+ assert_equal {3 5} [lsort [r smembers myset]]
+ }
+
+ test {SREM with multiple arguments} {
+ r del myset
+ r sadd myset a b c d
+ assert_equal 0 [r srem myset k k k]
+ assert_equal 2 [r srem myset b d x y]
+ lsort [r smembers myset]
+ } {a c}
+
+ test {SREM variadic version with more args needed to destroy the key} {
+ r del myset
+ r sadd myset 1 2 3
+ r srem myset 1 2 3 4 5 6 7 8
+ } {3}
+
+ test "SINTERCARD with illegal arguments" {
+ assert_error "ERR wrong number of arguments for 'sintercard' command" {r sintercard}
+ assert_error "ERR wrong number of arguments for 'sintercard' command" {r sintercard 1}
+
+ assert_error "ERR numkeys*" {r sintercard 0 myset{t}}
+ assert_error "ERR numkeys*" {r sintercard a myset{t}}
+
+ assert_error "ERR Number of keys*" {r sintercard 2 myset{t}}
+ assert_error "ERR Number of keys*" {r sintercard 3 myset{t} myset2{t}}
+
+ assert_error "ERR syntax error*" {r sintercard 1 myset{t} myset2{t}}
+ assert_error "ERR syntax error*" {r sintercard 1 myset{t} bar_arg}
+ assert_error "ERR syntax error*" {r sintercard 1 myset{t} LIMIT}
+
+ assert_error "ERR LIMIT*" {r sintercard 1 myset{t} LIMIT -1}
+ assert_error "ERR LIMIT*" {r sintercard 1 myset{t} LIMIT a}
+ }
+
+ test "SINTERCARD against non-set should throw error" {
+ r del set{t}
+ r sadd set{t} a b c
+ r set key1{t} x
+
+ assert_error "WRONGTYPE*" {r sintercard 1 key1{t}}
+ assert_error "WRONGTYPE*" {r sintercard 2 set{t} key1{t}}
+ assert_error "WRONGTYPE*" {r sintercard 2 key1{t} noset{t}}
+ }
+
+ test "SINTERCARD against non-existing key" {
+ assert_equal 0 [r sintercard 1 non-existing-key]
+ assert_equal 0 [r sintercard 1 non-existing-key limit 0]
+ assert_equal 0 [r sintercard 1 non-existing-key limit 10]
+ }
+
+ foreach {type} {regular intset} {
+ # Create sets setN{t} where N = 1..5
+ if {$type eq "regular"} {
+ set smallenc listpack
+ set bigenc hashtable
+ } else {
+ set smallenc intset
+ set bigenc intset
+ }
+ # Sets 1, 2 and 4 are big; sets 3 and 5 are small.
+ array set encoding "1 $bigenc 2 $bigenc 3 $smallenc 4 $bigenc 5 $smallenc"
+
+ for {set i 1} {$i <= 5} {incr i} {
+ r del [format "set%d{t}" $i]
+ }
+ for {set i 0} {$i < 200} {incr i} {
+ r sadd set1{t} $i
+ r sadd set2{t} [expr $i+195]
+ }
+ foreach i {199 195 1000 2000} {
+ r sadd set3{t} $i
+ }
+ for {set i 5} {$i < 200} {incr i} {
+ r sadd set4{t} $i
+ }
+ r sadd set5{t} 0
+
+ # To make sure the sets are encoded as the type we are testing -- also
+ # when the VM is enabled and the values may be swapped in and out
+ # while the tests are running -- an extra element is added to every
+ # set that determines its encoding.
+ set large 200
+ if {$type eq "regular"} {
+ set large foo
+ }
+
+ for {set i 1} {$i <= 5} {incr i} {
+ r sadd [format "set%d{t}" $i] $large
+ }
+
+ test "Generated sets must be encoded correctly - $type" {
+ for {set i 1} {$i <= 5} {incr i} {
+ assert_encoding $encoding($i) [format "set%d{t}" $i]
+ }
+ }
+
+ test "SINTER with two sets - $type" {
+ assert_equal [list 195 196 197 198 199 $large] [lsort [r sinter set1{t} set2{t}]]
+ }
+
+ test "SINTERCARD with two sets - $type" {
+ assert_equal 6 [r sintercard 2 set1{t} set2{t}]
+ assert_equal 6 [r sintercard 2 set1{t} set2{t} limit 0]
+ assert_equal 3 [r sintercard 2 set1{t} set2{t} limit 3]
+ assert_equal 6 [r sintercard 2 set1{t} set2{t} limit 10]
+ }
+
+ test "SINTERSTORE with two sets - $type" {
+ r sinterstore setres{t} set1{t} set2{t}
+ assert_encoding $smallenc setres{t}
+ assert_equal [list 195 196 197 198 199 $large] [lsort [r smembers setres{t}]]
+ }
+
+ test "SINTERSTORE with two sets, after a DEBUG RELOAD - $type" {
+ r debug reload
+ r sinterstore setres{t} set1{t} set2{t}
+ assert_encoding $smallenc setres{t}
+ assert_equal [list 195 196 197 198 199 $large] [lsort [r smembers setres{t}]]
+ } {} {needs:debug}
+
+ test "SUNION with two sets - $type" {
+ set expected [lsort -uniq "[r smembers set1{t}] [r smembers set2{t}]"]
+ assert_equal $expected [lsort [r sunion set1{t} set2{t}]]
+ }
+
+ test "SUNIONSTORE with two sets - $type" {
+ r sunionstore setres{t} set1{t} set2{t}
+ assert_encoding $bigenc setres{t}
+ set expected [lsort -uniq "[r smembers set1{t}] [r smembers set2{t}]"]
+ assert_equal $expected [lsort [r smembers setres{t}]]
+ }
+
+ test "SINTER against three sets - $type" {
+ assert_equal [list 195 199 $large] [lsort [r sinter set1{t} set2{t} set3{t}]]
+ }
+
+ test "SINTERCARD against three sets - $type" {
+ assert_equal 3 [r sintercard 3 set1{t} set2{t} set3{t}]
+ assert_equal 3 [r sintercard 3 set1{t} set2{t} set3{t} limit 0]
+ assert_equal 2 [r sintercard 3 set1{t} set2{t} set3{t} limit 2]
+ assert_equal 3 [r sintercard 3 set1{t} set2{t} set3{t} limit 10]
+ }
+
+ test "SINTERSTORE with three sets - $type" {
+ r sinterstore setres{t} set1{t} set2{t} set3{t}
+ assert_equal [list 195 199 $large] [lsort [r smembers setres{t}]]
+ }
+
+ test "SUNION with non existing keys - $type" {
+ set expected [lsort -uniq "[r smembers set1{t}] [r smembers set2{t}]"]
+ assert_equal $expected [lsort [r sunion nokey1{t} set1{t} set2{t} nokey2{t}]]
+ }
+
+ test "SDIFF with two sets - $type" {
+ assert_equal {0 1 2 3 4} [lsort [r sdiff set1{t} set4{t}]]
+ }
+
+ test "SDIFF with three sets - $type" {
+ assert_equal {1 2 3 4} [lsort [r sdiff set1{t} set4{t} set5{t}]]
+ }
+
+ test "SDIFFSTORE with three sets - $type" {
+ r sdiffstore setres{t} set1{t} set4{t} set5{t}
+ # When we start with intsets, we should always end with intsets.
+ if {$type eq {intset}} {
+ assert_encoding intset setres{t}
+ }
+ assert_equal {1 2 3 4} [lsort [r smembers setres{t}]]
+ }
+
+ test "SINTER/SUNION/SDIFF with three same sets - $type" {
+ set expected [lsort "[r smembers set1{t}]"]
+ assert_equal $expected [lsort [r sinter set1{t} set1{t} set1{t}]]
+ assert_equal $expected [lsort [r sunion set1{t} set1{t} set1{t}]]
+ assert_equal {} [lsort [r sdiff set1{t} set1{t} set1{t}]]
+ }
+ }
+
+ test "SINTERSTORE with two listpack sets where result is intset" {
+ r del setres{t} set1{t} set2{t}
+ r sadd set1{t} a b c 1 3 6 x y z
+ r sadd set2{t} e f g 1 2 3 u v w
+ assert_encoding listpack set1{t}
+ assert_encoding listpack set2{t}
+ r sinterstore setres{t} set1{t} set2{t}
+ assert_equal [list 1 3] [lsort [r smembers setres{t}]]
+ assert_encoding intset setres{t}
+ }
+
+ test "SINTERSTORE with two hashtable sets where result is intset" {
+ r del setres{t} set1{t} set2{t}
+ r sadd set1{t} a b c 444 555 666
+ r sadd set2{t} e f g 111 222 333
+ set expected {}
+ for {set i 1} {$i < 130} {incr i} {
+ r sadd set1{t} $i
+ r sadd set2{t} $i
+ lappend expected $i
+ }
+ assert_encoding hashtable set1{t}
+ assert_encoding hashtable set2{t}
+ r sinterstore setres{t} set1{t} set2{t}
+ assert_equal [lsort $expected] [lsort [r smembers setres{t}]]
+ assert_encoding intset setres{t}
+ }
+
+ test "SUNION hashtable and listpack" {
+ # This adds code coverage for adding a non-sds string to a hashtable set
+ # which already contains the string.
+ r del set1{t} set2{t}
+ set union {abcdefghijklmnopqrstuvwxyz1234567890 a b c 1 2 3}
+ create_set set1{t} $union
+ create_set set2{t} {a b c}
+ assert_encoding hashtable set1{t}
+ assert_encoding listpack set2{t}
+ assert_equal [lsort $union] [lsort [r sunion set1{t} set2{t}]]
+ }
+
+ test "SDIFF with first set empty" {
+ r del set1{t} set2{t} set3{t}
+ r sadd set2{t} 1 2 3 4
+ r sadd set3{t} a b c d
+ r sdiff set1{t} set2{t} set3{t}
+ } {}
+
+ test "SDIFF with same set two times" {
+ r del set1
+ r sadd set1 a b c 1 2 3 4 5 6
+ r sdiff set1 set1
+ } {}
+
+ test "SDIFF fuzzing" {
+ for {set j 0} {$j < 100} {incr j} {
+ unset -nocomplain s
+ array set s {}
+ set args {}
+ set num_sets [expr {[randomInt 10]+1}]
+ for {set i 0} {$i < $num_sets} {incr i} {
+ set num_elements [randomInt 100]
+ r del set_$i{t}
+ lappend args set_$i{t}
+ while {$num_elements} {
+ set ele [randomValue]
+ r sadd set_$i{t} $ele
+ if {$i == 0} {
+ set s($ele) x
+ } else {
+ unset -nocomplain s($ele)
+ }
+ incr num_elements -1
+ }
+ }
+ set result [lsort [r sdiff {*}$args]]
+ assert_equal $result [lsort [array names s]]
+ }
+ }
+
+ test "SDIFF against non-set should throw error" {
+ # with an empty set
+ r set key1{t} x
+ assert_error "WRONGTYPE*" {r sdiff key1{t} noset{t}}
+ # different order
+ assert_error "WRONGTYPE*" {r sdiff noset{t} key1{t}}
+
+ # with a legal set
+ r del set1{t}
+ r sadd set1{t} a b c
+ assert_error "WRONGTYPE*" {r sdiff key1{t} set1{t}}
+ # different order
+ assert_error "WRONGTYPE*" {r sdiff set1{t} key1{t}}
+ }
+
+ test "SDIFF should handle non existing key as empty" {
+ r del set1{t} set2{t} set3{t}
+
+ r sadd set1{t} a b c
+ r sadd set2{t} b c d
+ assert_equal {a} [lsort [r sdiff set1{t} set2{t} set3{t}]]
+ assert_equal {} [lsort [r sdiff set3{t} set2{t} set1{t}]]
+ }
+
+ test "SDIFFSTORE against non-set should throw error" {
+ r del set1{t} set2{t} set3{t} key1{t}
+ r set key1{t} x
+
+ # with en empty dstkey
+ assert_error "WRONGTYPE*" {r SDIFFSTORE set3{t} key1{t} noset{t}}
+ assert_equal 0 [r exists set3{t}]
+ assert_error "WRONGTYPE*" {r SDIFFSTORE set3{t} noset{t} key1{t}}
+ assert_equal 0 [r exists set3{t}]
+
+ # with a legal dstkey
+ r sadd set1{t} a b c
+ r sadd set2{t} b c d
+ r sadd set3{t} e
+ assert_error "WRONGTYPE*" {r SDIFFSTORE set3{t} key1{t} set1{t} noset{t}}
+ assert_equal 1 [r exists set3{t}]
+ assert_equal {e} [lsort [r smembers set3{t}]]
+
+ assert_error "WRONGTYPE*" {r SDIFFSTORE set3{t} set1{t} key1{t} set2{t}}
+ assert_equal 1 [r exists set3{t}]
+ assert_equal {e} [lsort [r smembers set3{t}]]
+ }
+
+ test "SDIFFSTORE should handle non existing key as empty" {
+ r del set1{t} set2{t} set3{t}
+
+ r set setres{t} xxx
+ assert_equal 0 [r sdiffstore setres{t} foo111{t} bar222{t}]
+ assert_equal 0 [r exists setres{t}]
+
+ # with a legal dstkey, should delete dstkey
+ r sadd set3{t} a b c
+ assert_equal 0 [r sdiffstore set3{t} set1{t} set2{t}]
+ assert_equal 0 [r exists set3{t}]
+
+ r sadd set1{t} a b c
+ assert_equal 3 [r sdiffstore set3{t} set1{t} set2{t}]
+ assert_equal 1 [r exists set3{t}]
+ assert_equal {a b c} [lsort [r smembers set3{t}]]
+
+ # with a legal dstkey and empty set2, should delete the dstkey
+ r sadd set3{t} a b c
+ assert_equal 0 [r sdiffstore set3{t} set2{t} set1{t}]
+ assert_equal 0 [r exists set3{t}]
+ }
+
+ test "SINTER against non-set should throw error" {
+ r set key1{t} x
+ assert_error "WRONGTYPE*" {r sinter key1{t} noset{t}}
+ # different order
+ assert_error "WRONGTYPE*" {r sinter noset{t} key1{t}}
+
+ r sadd set1{t} a b c
+ assert_error "WRONGTYPE*" {r sinter key1{t} set1{t}}
+ # different order
+ assert_error "WRONGTYPE*" {r sinter set1{t} key1{t}}
+ }
+
+ test "SINTER should handle non existing key as empty" {
+ r del set1{t} set2{t} set3{t}
+ r sadd set1{t} a b c
+ r sadd set2{t} b c d
+ r sinter set1{t} set2{t} set3{t}
+ } {}
+
+ test "SINTER with same integer elements but different encoding" {
+ r del set1{t} set2{t}
+ r sadd set1{t} 1 2 3
+ r sadd set2{t} 1 2 3 a
+ r srem set2{t} a
+ assert_encoding intset set1{t}
+ assert_encoding listpack set2{t}
+ lsort [r sinter set1{t} set2{t}]
+ } {1 2 3}
+
+ test "SINTERSTORE against non-set should throw error" {
+ r del set1{t} set2{t} set3{t} key1{t}
+ r set key1{t} x
+
+ # with en empty dstkey
+ assert_error "WRONGTYPE*" {r sinterstore set3{t} key1{t} noset{t}}
+ assert_equal 0 [r exists set3{t}]
+ assert_error "WRONGTYPE*" {r sinterstore set3{t} noset{t} key1{t}}
+ assert_equal 0 [r exists set3{t}]
+
+ # with a legal dstkey
+ r sadd set1{t} a b c
+ r sadd set2{t} b c d
+ r sadd set3{t} e
+ assert_error "WRONGTYPE*" {r sinterstore set3{t} key1{t} set2{t} noset{t}}
+ assert_equal 1 [r exists set3{t}]
+ assert_equal {e} [lsort [r smembers set3{t}]]
+
+ assert_error "WRONGTYPE*" {r sinterstore set3{t} noset{t} key1{t} set2{t}}
+ assert_equal 1 [r exists set3{t}]
+ assert_equal {e} [lsort [r smembers set3{t}]]
+ }
+
+ test "SINTERSTORE against non existing keys should delete dstkey" {
+ r del set1{t} set2{t} set3{t}
+
+ r set setres{t} xxx
+ assert_equal 0 [r sinterstore setres{t} foo111{t} bar222{t}]
+ assert_equal 0 [r exists setres{t}]
+
+ # with a legal dstkey
+ r sadd set3{t} a b c
+ assert_equal 0 [r sinterstore set3{t} set1{t} set2{t}]
+ assert_equal 0 [r exists set3{t}]
+
+ r sadd set1{t} a b c
+ assert_equal 0 [r sinterstore set3{t} set1{t} set2{t}]
+ assert_equal 0 [r exists set3{t}]
+
+ assert_equal 0 [r sinterstore set3{t} set2{t} set1{t}]
+ assert_equal 0 [r exists set3{t}]
+ }
+
+ test "SUNION against non-set should throw error" {
+ r set key1{t} x
+ assert_error "WRONGTYPE*" {r sunion key1{t} noset{t}}
+ # different order
+ assert_error "WRONGTYPE*" {r sunion noset{t} key1{t}}
+
+ r del set1{t}
+ r sadd set1{t} a b c
+ assert_error "WRONGTYPE*" {r sunion key1{t} set1{t}}
+ # different order
+ assert_error "WRONGTYPE*" {r sunion set1{t} key1{t}}
+ }
+
+ test "SUNION should handle non existing key as empty" {
+ r del set1{t} set2{t} set3{t}
+
+ r sadd set1{t} a b c
+ r sadd set2{t} b c d
+ assert_equal {a b c d} [lsort [r sunion set1{t} set2{t} set3{t}]]
+ }
+
+ test "SUNIONSTORE against non-set should throw error" {
+ r del set1{t} set2{t} set3{t} key1{t}
+ r set key1{t} x
+
+ # with en empty dstkey
+ assert_error "WRONGTYPE*" {r sunionstore set3{t} key1{t} noset{t}}
+ assert_equal 0 [r exists set3{t}]
+ assert_error "WRONGTYPE*" {r sunionstore set3{t} noset{t} key1{t}}
+ assert_equal 0 [r exists set3{t}]
+
+ # with a legal dstkey
+ r sadd set1{t} a b c
+ r sadd set2{t} b c d
+ r sadd set3{t} e
+ assert_error "WRONGTYPE*" {r sunionstore set3{t} key1{t} key2{t} noset{t}}
+ assert_equal 1 [r exists set3{t}]
+ assert_equal {e} [lsort [r smembers set3{t}]]
+
+ assert_error "WRONGTYPE*" {r sunionstore set3{t} noset{t} key1{t} key2{t}}
+ assert_equal 1 [r exists set3{t}]
+ assert_equal {e} [lsort [r smembers set3{t}]]
+ }
+
+ test "SUNIONSTORE should handle non existing key as empty" {
+ r del set1{t} set2{t} set3{t}
+
+ r set setres{t} xxx
+ assert_equal 0 [r sunionstore setres{t} foo111{t} bar222{t}]
+ assert_equal 0 [r exists setres{t}]
+
+ # set1 set2 both empty, should delete the dstkey
+ r sadd set3{t} a b c
+ assert_equal 0 [r sunionstore set3{t} set1{t} set2{t}]
+ assert_equal 0 [r exists set3{t}]
+
+ r sadd set1{t} a b c
+ r sadd set3{t} e f
+ assert_equal 3 [r sunionstore set3{t} set1{t} set2{t}]
+ assert_equal 1 [r exists set3{t}]
+ assert_equal {a b c} [lsort [r smembers set3{t}]]
+
+ r sadd set3{t} d
+ assert_equal 3 [r sunionstore set3{t} set2{t} set1{t}]
+ assert_equal 1 [r exists set3{t}]
+ assert_equal {a b c} [lsort [r smembers set3{t}]]
+ }
+
+ test "SUNIONSTORE against non existing keys should delete dstkey" {
+ r set setres{t} xxx
+ assert_equal 0 [r sunionstore setres{t} foo111{t} bar222{t}]
+ assert_equal 0 [r exists setres{t}]
+ }
+
+ foreach {type contents} {listpack {a b c} intset {1 2 3}} {
+ test "SPOP basics - $type" {
+ create_set myset $contents
+ assert_encoding $type myset
+ assert_equal $contents [lsort [list [r spop myset] [r spop myset] [r spop myset]]]
+ assert_equal 0 [r scard myset]
+ }
+
+ test "SPOP with <count>=1 - $type" {
+ create_set myset $contents
+ assert_encoding $type myset
+ assert_equal $contents [lsort [list [r spop myset 1] [r spop myset 1] [r spop myset 1]]]
+ assert_equal 0 [r scard myset]
+ }
+
+ test "SRANDMEMBER - $type" {
+ create_set myset $contents
+ unset -nocomplain myset
+ array set myset {}
+ for {set i 0} {$i < 100} {incr i} {
+ set myset([r srandmember myset]) 1
+ }
+ assert_equal $contents [lsort [array names myset]]
+ }
+ }
+
+ test "SPOP integer from listpack set" {
+ create_set myset {a 1 2 3 4 5 6 7}
+ assert_encoding listpack myset
+ set a [r spop myset]
+ set b [r spop myset]
+ assert {[string is digit $a] || [string is digit $b]}
+ }
+
+ foreach {type contents} {
+ listpack {a b c d e f g h i j k l m n o p q r s t u v w x y z}
+ intset {1 10 11 12 13 14 15 16 17 18 19 2 20 21 22 23 24 25 26 3 4 5 6 7 8 9}
+ hashtable {ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 b c d e f g h i j k l m n o p q r s t u v w x y z}
+ } {
+ test "SPOP with <count> - $type" {
+ create_set myset $contents
+ assert_encoding $type myset
+ assert_equal $contents [lsort [concat [r spop myset 11] [r spop myset 9] [r spop myset 0] [r spop myset 4] [r spop myset 1] [r spop myset 0] [r spop myset 1] [r spop myset 0]]]
+ assert_equal 0 [r scard myset]
+ }
+ }
+
+ # As seen in intsetRandomMembers
+ test "SPOP using integers, testing Knuth's and Floyd's algorithm" {
+ create_set myset {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20}
+ assert_encoding intset myset
+ assert_equal 20 [r scard myset]
+ r spop myset 1
+ assert_equal 19 [r scard myset]
+ r spop myset 2
+ assert_equal 17 [r scard myset]
+ r spop myset 3
+ assert_equal 14 [r scard myset]
+ r spop myset 10
+ assert_equal 4 [r scard myset]
+ r spop myset 10
+ assert_equal 0 [r scard myset]
+ r spop myset 1
+ assert_equal 0 [r scard myset]
+ } {}
+
+ test "SPOP using integers with Knuth's algorithm" {
+ r spop nonexisting_key 100
+ } {}
+
+ foreach {type content} {
+ intset {1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20}
+ listpack {a 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20}
+ } {
+ test "SPOP new implementation: code path #1 $type" {
+ create_set myset $content
+ assert_encoding $type myset
+ set res [r spop myset 30]
+ assert {[lsort $content] eq [lsort $res]}
+ assert_equal {0} [r exists myset]
+ }
+
+ test "SPOP new implementation: code path #2 $type" {
+ create_set myset $content
+ assert_encoding $type myset
+ set res [r spop myset 2]
+ assert {[llength $res] == 2}
+ assert {[r scard myset] == 18}
+ set union [concat [r smembers myset] $res]
+ assert {[lsort $union] eq [lsort $content]}
+ }
+
+ test "SPOP new implementation: code path #3 $type" {
+ create_set myset $content
+ assert_encoding $type myset
+ set res [r spop myset 18]
+ assert {[llength $res] == 18}
+ assert {[r scard myset] == 2}
+ set union [concat [r smembers myset] $res]
+ assert {[lsort $union] eq [lsort $content]}
+ }
+ }
+
+ test "SPOP new implementation: code path #1 propagate as DEL or UNLINK" {
+ r del myset1{t} myset2{t}
+ r sadd myset1{t} 1 2 3 4 5
+ r sadd myset2{t} 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
+
+ set repl [attach_to_replication_stream]
+
+ r config set lazyfree-lazy-server-del no
+ r spop myset1{t} [r scard myset1{t}]
+ r config set lazyfree-lazy-server-del yes
+ r spop myset2{t} [r scard myset2{t}]
+ assert_equal {0} [r exists myset1{t} myset2{t}]
+
+ # Verify the propagate of DEL and UNLINK.
+ assert_replication_stream $repl {
+ {select *}
+ {del myset1{t}}
+ {unlink myset2{t}}
+ }
+
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test "SRANDMEMBER count of 0 is handled correctly" {
+ r srandmember myset 0
+ } {}
+
+ test "SRANDMEMBER with <count> against non existing key" {
+ r srandmember nonexisting_key 100
+ } {}
+
+ test "SRANDMEMBER count overflow" {
+ r sadd myset a
+ assert_error {*value is out of range*} {r srandmember myset -9223372036854775808}
+ } {}
+
+ # Make sure we can distinguish between an empty array and a null response
+ r readraw 1
+
+ test "SRANDMEMBER count of 0 is handled correctly - emptyarray" {
+ r srandmember myset 0
+ } {*0}
+
+ test "SRANDMEMBER with <count> against non existing key - emptyarray" {
+ r srandmember nonexisting_key 100
+ } {*0}
+
+ r readraw 0
+
+ foreach {type contents} {
+ listpack {
+ 1 5 10 50 125 50000 33959417 4775547 65434162
+ 12098459 427716 483706 2726473884 72615637475
+ MARY PATRICIA LINDA BARBARA ELIZABETH JENNIFER MARIA
+ SUSAN MARGARET DOROTHY LISA NANCY KAREN BETTY HELEN
+ SANDRA DONNA CAROL RUTH SHARON MICHELLE LAURA SARAH
+ KIMBERLY DEBORAH JESSICA SHIRLEY CYNTHIA ANGELA MELISSA
+ BRENDA AMY ANNA REBECCA VIRGINIA KATHLEEN
+ }
+ intset {
+ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
+ 20 21 22 23 24 25 26 27 28 29
+ 30 31 32 33 34 35 36 37 38 39
+ 40 41 42 43 44 45 46 47 48 49
+ }
+ hashtable {
+ ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
+ 1 5 10 50 125 50000 33959417 4775547 65434162
+ 12098459 427716 483706 2726473884 72615637475
+ MARY PATRICIA LINDA BARBARA ELIZABETH JENNIFER MARIA
+ SUSAN MARGARET DOROTHY LISA NANCY KAREN BETTY HELEN
+ SANDRA DONNA CAROL RUTH SHARON MICHELLE LAURA SARAH
+ KIMBERLY DEBORAH JESSICA SHIRLEY CYNTHIA ANGELA MELISSA
+ BRENDA AMY ANNA REBECCA VIRGINIA
+ }
+ } {
+ test "SRANDMEMBER with <count> - $type" {
+ create_set myset $contents
+ assert_encoding $type myset
+ unset -nocomplain myset
+ array set myset {}
+ foreach ele [r smembers myset] {
+ set myset($ele) 1
+ }
+ assert_equal [lsort $contents] [lsort [array names myset]]
+
+ # Make sure that a count of 0 is handled correctly.
+ assert_equal [r srandmember myset 0] {}
+
+ # We'll stress different parts of the code, see the implementation
+ # of SRANDMEMBER for more information, but basically there are
+ # four different code paths.
+ #
+ # PATH 1: Use negative count.
+ #
+ # 1) Check that it returns repeated elements.
+ set res [r srandmember myset -100]
+ assert_equal [llength $res] 100
+
+ # 2) Check that all the elements actually belong to the
+ # original set.
+ foreach ele $res {
+ assert {[info exists myset($ele)]}
+ }
+
+ # 3) Check that eventually all the elements are returned.
+ unset -nocomplain auxset
+ set iterations 1000
+ while {$iterations != 0} {
+ incr iterations -1
+ set res [r srandmember myset -10]
+ foreach ele $res {
+ set auxset($ele) 1
+ }
+ if {[lsort [array names myset]] eq
+ [lsort [array names auxset]]} {
+ break;
+ }
+ }
+ assert {$iterations != 0}
+
+ # PATH 2: positive count (unique behavior) with requested size
+ # equal or greater than set size.
+ foreach size {50 100} {
+ set res [r srandmember myset $size]
+ assert_equal [llength $res] 50
+ assert_equal [lsort $res] [lsort [array names myset]]
+ }
+
+ # PATH 3: Ask almost as elements as there are in the set.
+ # In this case the implementation will duplicate the original
+ # set and will remove random elements up to the requested size.
+ #
+ # PATH 4: Ask a number of elements definitely smaller than
+ # the set size.
+ #
+ # We can test both the code paths just changing the size but
+ # using the same code.
+
+ foreach size {45 5} {
+ set res [r srandmember myset $size]
+ assert_equal [llength $res] $size
+
+ # 1) Check that all the elements actually belong to the
+ # original set.
+ foreach ele $res {
+ assert {[info exists myset($ele)]}
+ }
+
+ # 2) Check that eventually all the elements are returned.
+ unset -nocomplain auxset
+ set iterations 1000
+ while {$iterations != 0} {
+ incr iterations -1
+ set res [r srandmember myset $size]
+ foreach ele $res {
+ set auxset($ele) 1
+ }
+ if {[lsort [array names myset]] eq
+ [lsort [array names auxset]]} {
+ break;
+ }
+ }
+ assert {$iterations != 0}
+ }
+ }
+ }
+
+ foreach {type contents} {
+ listpack {
+ 1 5 10 50 125
+ MARY PATRICIA LINDA BARBARA ELIZABETH
+ }
+ intset {
+ 0 1 2 3 4 5 6 7 8 9
+ }
+ hashtable {
+ ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
+ 1 5 10 50 125
+ MARY PATRICIA LINDA BARBARA
+ }
+ } {
+ test "SRANDMEMBER histogram distribution - $type" {
+ create_set myset $contents
+ assert_encoding $type myset
+ unset -nocomplain myset
+ array set myset {}
+ foreach ele [r smembers myset] {
+ set myset($ele) 1
+ }
+
+ # Use negative count (PATH 1).
+ # df = 9, 40 means 0.00001 probability
+ set res [r srandmember myset -1000]
+ assert_lessthan [chi_square_value $res] 40
+
+ # Use positive count (both PATH 3 and PATH 4).
+ foreach size {8 2} {
+ unset -nocomplain allkey
+ set iterations [expr {1000 / $size}]
+ while {$iterations != 0} {
+ incr iterations -1
+ set res [r srandmember myset $size]
+ foreach ele $res {
+ lappend allkey $ele
+ }
+ }
+ # df = 9, 40 means 0.00001 probability
+ assert_lessthan [chi_square_value $allkey] 40
+ }
+ }
+ }
+
+ proc is_rehashing {myset} {
+ set htstats [r debug HTSTATS-KEY $myset]
+ return [string match {*rehashing target*} $htstats]
+ }
+
+ proc rem_hash_set_top_N {myset n} {
+ set cursor 0
+ set members {}
+ set enough 0
+ while 1 {
+ set res [r sscan $myset $cursor]
+ set cursor [lindex $res 0]
+ set k [lindex $res 1]
+ foreach m $k {
+ lappend members $m
+ if {[llength $members] >= $n} {
+ set enough 1
+ break
+ }
+ }
+ if {$enough || $cursor == 0} {
+ break
+ }
+ }
+ r srem $myset {*}$members
+ }
+
+ test "SRANDMEMBER with a dict containing long chain" {
+ set origin_save [config_get_set save ""]
+ set origin_max_lp [config_get_set set-max-listpack-entries 0]
+ set origin_save_delay [config_get_set rdb-key-save-delay 2147483647]
+
+ # 1) Create a hash set with 100000 members.
+ set members {}
+ for {set i 0} {$i < 100000} {incr i} {
+ lappend members [format "m:%d" $i]
+ }
+ create_set myset $members
+
+ # 2) Wait for the hash set rehashing to finish.
+ while {[is_rehashing myset]} {
+ r srandmember myset 100
+ }
+
+ # 3) Turn off the rehashing of this set, and remove the members to 500.
+ r bgsave
+ rem_hash_set_top_N myset [expr {[r scard myset] - 500}]
+ assert_equal [r scard myset] 500
+
+ # 4) Kill RDB child process to restart rehashing.
+ set pid1 [get_child_pid 0]
+ catch {exec kill -9 $pid1}
+ waitForBgsave r
+
+ # 5) Let the set hash to start rehashing
+ r spop myset 1
+ assert [is_rehashing myset]
+
+ # 6) Verify that when rdb saving is in progress, rehashing will still be performed (because
+ # the ratio is extreme) by waiting for it to finish during an active bgsave.
+ r bgsave
+
+ while {[is_rehashing myset]} {
+ r srandmember myset 1
+ }
+ if {$::verbose} {
+ puts [r debug HTSTATS-KEY myset full]
+ }
+
+ set pid1 [get_child_pid 0]
+ catch {exec kill -9 $pid1}
+ waitForBgsave r
+
+ # 7) Check that eventually, SRANDMEMBER returns all elements.
+ array set allmyset {}
+ foreach ele [r smembers myset] {
+ set allmyset($ele) 1
+ }
+ unset -nocomplain auxset
+ set iterations 1000
+ while {$iterations != 0} {
+ incr iterations -1
+ set res [r srandmember myset -10]
+ foreach ele $res {
+ set auxset($ele) 1
+ }
+ if {[lsort [array names allmyset]] eq
+ [lsort [array names auxset]]} {
+ break;
+ }
+ }
+ assert {$iterations != 0}
+
+ # 8) Remove the members to 30 in order to calculate the value of Chi-Square Distribution,
+ # otherwise we would need more iterations.
+ rem_hash_set_top_N myset [expr {[r scard myset] - 30}]
+ assert_equal [r scard myset] 30
+ assert {[is_rehashing myset]}
+
+ # Now that we have a hash set with only one long chain bucket.
+ set htstats [r debug HTSTATS-KEY myset full]
+ assert {[regexp {different slots: ([0-9]+)} $htstats - different_slots]}
+ assert {[regexp {max chain length: ([0-9]+)} $htstats - max_chain_length]}
+ assert {$different_slots == 1 && $max_chain_length == 30}
+
+ # 9) Use positive count (PATH 4) to get 10 elements (out of 30) each time.
+ unset -nocomplain allkey
+ set iterations 1000
+ while {$iterations != 0} {
+ incr iterations -1
+ set res [r srandmember myset 10]
+ foreach ele $res {
+ lappend allkey $ele
+ }
+ }
+ # validate even distribution of random sampling (df = 29, 73 means 0.00001 probability)
+ assert_lessthan [chi_square_value $allkey] 73
+
+ r config set save $origin_save
+ r config set set-max-listpack-entries $origin_max_lp
+ r config set rdb-key-save-delay $origin_save_delay
+ } {OK} {needs:debug slow}
+
+ proc setup_move {} {
+ r del myset3{t} myset4{t}
+ create_set myset1{t} {1 a b}
+ create_set myset2{t} {2 3 4}
+ assert_encoding listpack myset1{t}
+ assert_encoding intset myset2{t}
+ }
+
+ test "SMOVE basics - from regular set to intset" {
+ # move a non-integer element to an intset should convert encoding
+ setup_move
+ assert_equal 1 [r smove myset1{t} myset2{t} a]
+ assert_equal {1 b} [lsort [r smembers myset1{t}]]
+ assert_equal {2 3 4 a} [lsort [r smembers myset2{t}]]
+ assert_encoding listpack myset2{t}
+
+ # move an integer element should not convert the encoding
+ setup_move
+ assert_equal 1 [r smove myset1{t} myset2{t} 1]
+ assert_equal {a b} [lsort [r smembers myset1{t}]]
+ assert_equal {1 2 3 4} [lsort [r smembers myset2{t}]]
+ assert_encoding intset myset2{t}
+ }
+
+ test "SMOVE basics - from intset to regular set" {
+ setup_move
+ assert_equal 1 [r smove myset2{t} myset1{t} 2]
+ assert_equal {1 2 a b} [lsort [r smembers myset1{t}]]
+ assert_equal {3 4} [lsort [r smembers myset2{t}]]
+ }
+
+ test "SMOVE non existing key" {
+ setup_move
+ assert_equal 0 [r smove myset1{t} myset2{t} foo]
+ assert_equal 0 [r smove myset1{t} myset1{t} foo]
+ assert_equal {1 a b} [lsort [r smembers myset1{t}]]
+ assert_equal {2 3 4} [lsort [r smembers myset2{t}]]
+ }
+
+ test "SMOVE non existing src set" {
+ setup_move
+ assert_equal 0 [r smove noset{t} myset2{t} foo]
+ assert_equal {2 3 4} [lsort [r smembers myset2{t}]]
+ }
+
+ test "SMOVE from regular set to non existing destination set" {
+ setup_move
+ assert_equal 1 [r smove myset1{t} myset3{t} a]
+ assert_equal {1 b} [lsort [r smembers myset1{t}]]
+ assert_equal {a} [lsort [r smembers myset3{t}]]
+ assert_encoding listpack myset3{t}
+ }
+
+ test "SMOVE from intset to non existing destination set" {
+ setup_move
+ assert_equal 1 [r smove myset2{t} myset3{t} 2]
+ assert_equal {3 4} [lsort [r smembers myset2{t}]]
+ assert_equal {2} [lsort [r smembers myset3{t}]]
+ assert_encoding intset myset3{t}
+ }
+
+ test "SMOVE wrong src key type" {
+ r set x{t} 10
+ assert_error "WRONGTYPE*" {r smove x{t} myset2{t} foo}
+ }
+
+ test "SMOVE wrong dst key type" {
+ r set x{t} 10
+ assert_error "WRONGTYPE*" {r smove myset2{t} x{t} foo}
+ }
+
+ test "SMOVE with identical source and destination" {
+ r del set{t}
+ r sadd set{t} a b c
+ r smove set{t} set{t} b
+ lsort [r smembers set{t}]
+ } {a b c}
+
+ test "SMOVE only notify dstset when the addition is successful" {
+ r del srcset{t}
+ r del dstset{t}
+
+ r sadd srcset{t} a b
+ r sadd dstset{t} a
+
+ r watch dstset{t}
+
+ r multi
+ r sadd dstset{t} c
+
+ set r2 [redis_client]
+ $r2 smove srcset{t} dstset{t} a
+
+ # The dstset is actually unchanged, multi should success
+ r exec
+ set res [r scard dstset{t}]
+ assert_equal $res 2
+ $r2 close
+ }
+
+ tags {slow} {
+ test {intsets implementation stress testing} {
+ for {set j 0} {$j < 20} {incr j} {
+ unset -nocomplain s
+ array set s {}
+ r del s
+ set len [randomInt 1024]
+ for {set i 0} {$i < $len} {incr i} {
+ randpath {
+ set data [randomInt 65536]
+ } {
+ set data [randomInt 4294967296]
+ } {
+ set data [randomInt 18446744073709551616]
+ }
+ set s($data) {}
+ r sadd s $data
+ }
+ assert_equal [lsort [r smembers s]] [lsort [array names s]]
+ set len [array size s]
+ for {set i 0} {$i < $len} {incr i} {
+ set e [r spop s]
+ if {![info exists s($e)]} {
+ puts "Can't find '$e' on local array"
+ puts "Local array: [lsort [r smembers s]]"
+ puts "Remote array: [lsort [array names s]]"
+ error "exception"
+ }
+ array unset s $e
+ }
+ assert_equal [r scard s] 0
+ assert_equal [array size s] 0
+ }
+ }
+ }
+}
+
+run_solo {set-large-memory} {
+start_server [list overrides [list save ""] ] {
+
+# test if the server supports such large configs (avoid 32 bit builds)
+catch {
+ r config set proto-max-bulk-len 10000000000 ;#10gb
+ r config set client-query-buffer-limit 10000000000 ;#10gb
+}
+if {[lindex [r config get proto-max-bulk-len] 1] == 10000000000} {
+
+ set str_length 4400000000 ;#~4.4GB
+
+ test {SADD, SCARD, SISMEMBER - large data} {
+ r flushdb
+ r write "*3\r\n\$4\r\nSADD\r\n\$5\r\nmyset\r\n"
+ assert_equal 1 [write_big_bulk $str_length "aaa"]
+ r write "*3\r\n\$4\r\nSADD\r\n\$5\r\nmyset\r\n"
+ assert_equal 1 [write_big_bulk $str_length "bbb"]
+ r write "*3\r\n\$4\r\nSADD\r\n\$5\r\nmyset\r\n"
+ assert_equal 0 [write_big_bulk $str_length "aaa"]
+ assert_encoding hashtable myset
+ set s0 [s used_memory]
+ assert {$s0 > [expr $str_length * 2]}
+ assert_equal 2 [r scard myset]
+
+ r write "*3\r\n\$9\r\nSISMEMBER\r\n\$5\r\nmyset\r\n"
+ assert_equal 1 [write_big_bulk $str_length "aaa"]
+ r write "*3\r\n\$9\r\nSISMEMBER\r\n\$5\r\nmyset\r\n"
+ assert_equal 0 [write_big_bulk $str_length "ccc"]
+ r write "*3\r\n\$4\r\nSREM\r\n\$5\r\nmyset\r\n"
+ assert_equal 1 [write_big_bulk $str_length "bbb"]
+ assert_equal [read_big_bulk {r spop myset} yes "aaa"] $str_length
+ } {} {large-memory}
+
+ # restore defaults
+ r config set proto-max-bulk-len 536870912
+ r config set client-query-buffer-limit 1073741824
+
+} ;# skip 32bit builds
+}
+} ;# run_solo
diff --git a/tests/unit/type/stream-cgroups.tcl b/tests/unit/type/stream-cgroups.tcl
new file mode 100644
index 0000000..a6cc5da
--- /dev/null
+++ b/tests/unit/type/stream-cgroups.tcl
@@ -0,0 +1,1297 @@
+start_server {
+ tags {"stream"}
+} {
+ test {XGROUP CREATE: creation and duplicate group name detection} {
+ r DEL mystream
+ r XADD mystream * foo bar
+ r XGROUP CREATE mystream mygroup $
+ catch {r XGROUP CREATE mystream mygroup $} err
+ set err
+ } {BUSYGROUP*}
+
+ test {XGROUP CREATE: with ENTRIESREAD parameter} {
+ r DEL mystream
+ r XADD mystream 1-1 a 1
+ r XADD mystream 1-2 b 2
+ r XADD mystream 1-3 c 3
+ r XADD mystream 1-4 d 4
+ assert_error "*value for ENTRIESREAD must be positive or -1*" {r XGROUP CREATE mystream mygroup $ ENTRIESREAD -3}
+
+ r XGROUP CREATE mystream mygroup1 $ ENTRIESREAD 0
+ r XGROUP CREATE mystream mygroup2 $ ENTRIESREAD 3
+
+ set reply [r xinfo groups mystream]
+ foreach group_info $reply {
+ set group_name [dict get $group_info name]
+ set entries_read [dict get $group_info entries-read]
+ if {$group_name == "mygroup1"} {
+ assert_equal $entries_read 0
+ } else {
+ assert_equal $entries_read 3
+ }
+ }
+ }
+
+ test {XGROUP CREATE: automatic stream creation fails without MKSTREAM} {
+ r DEL mystream
+ catch {r XGROUP CREATE mystream mygroup $} err
+ set err
+ } {ERR*}
+
+ test {XGROUP CREATE: automatic stream creation works with MKSTREAM} {
+ r DEL mystream
+ r XGROUP CREATE mystream mygroup $ MKSTREAM
+ } {OK}
+
+ test {XREADGROUP will return only new elements} {
+ r XADD mystream * a 1
+ r XADD mystream * b 2
+ # XREADGROUP should return only the new elements "a 1" "b 1"
+ # and not the element "foo bar" which was pre existing in the
+ # stream (see previous test)
+ set reply [
+ r XREADGROUP GROUP mygroup consumer-1 STREAMS mystream ">"
+ ]
+ assert {[llength [lindex $reply 0 1]] == 2}
+ lindex $reply 0 1 0 1
+ } {a 1}
+
+ test {XREADGROUP can read the history of the elements we own} {
+ # Add a few more elements
+ r XADD mystream * c 3
+ r XADD mystream * d 4
+ # Read a few elements using a different consumer name
+ set reply [
+ r XREADGROUP GROUP mygroup consumer-2 STREAMS mystream ">"
+ ]
+ assert {[llength [lindex $reply 0 1]] == 2}
+ assert {[lindex $reply 0 1 0 1] eq {c 3}}
+
+ set r1 [r XREADGROUP GROUP mygroup consumer-1 COUNT 10 STREAMS mystream 0]
+ set r2 [r XREADGROUP GROUP mygroup consumer-2 COUNT 10 STREAMS mystream 0]
+ assert {[lindex $r1 0 1 0 1] eq {a 1}}
+ assert {[lindex $r2 0 1 0 1] eq {c 3}}
+ }
+
+ test {XPENDING is able to return pending items} {
+ set pending [r XPENDING mystream mygroup - + 10]
+ assert {[llength $pending] == 4}
+ for {set j 0} {$j < 4} {incr j} {
+ set item [lindex $pending $j]
+ if {$j < 2} {
+ set owner consumer-1
+ } else {
+ set owner consumer-2
+ }
+ assert {[lindex $item 1] eq $owner}
+ assert {[lindex $item 1] eq $owner}
+ }
+ }
+
+ test {XPENDING can return single consumer items} {
+ set pending [r XPENDING mystream mygroup - + 10 consumer-1]
+ assert {[llength $pending] == 2}
+ }
+
+ test {XPENDING only group} {
+ set pending [r XPENDING mystream mygroup]
+ assert {[llength $pending] == 4}
+ }
+
+ test {XPENDING with IDLE} {
+ after 20
+ set pending [r XPENDING mystream mygroup IDLE 99999999 - + 10 consumer-1]
+ assert {[llength $pending] == 0}
+ set pending [r XPENDING mystream mygroup IDLE 1 - + 10 consumer-1]
+ assert {[llength $pending] == 2}
+ set pending [r XPENDING mystream mygroup IDLE 99999999 - + 10]
+ assert {[llength $pending] == 0}
+ set pending [r XPENDING mystream mygroup IDLE 1 - + 10]
+ assert {[llength $pending] == 4}
+ }
+
+ test {XPENDING with exclusive range intervals works as expected} {
+ set pending [r XPENDING mystream mygroup - + 10]
+ assert {[llength $pending] == 4}
+ set startid [lindex [lindex $pending 0] 0]
+ set endid [lindex [lindex $pending 3] 0]
+ set expending [r XPENDING mystream mygroup ($startid ($endid 10]
+ assert {[llength $expending] == 2}
+ for {set j 0} {$j < 2} {incr j} {
+ set itemid [lindex [lindex $expending $j] 0]
+ assert {$itemid ne $startid}
+ assert {$itemid ne $endid}
+ }
+ }
+
+ test {XACK is able to remove items from the consumer/group PEL} {
+ set pending [r XPENDING mystream mygroup - + 10 consumer-1]
+ set id1 [lindex $pending 0 0]
+ set id2 [lindex $pending 1 0]
+ assert {[r XACK mystream mygroup $id1] eq 1}
+ set pending [r XPENDING mystream mygroup - + 10 consumer-1]
+ assert {[llength $pending] == 1}
+ set id [lindex $pending 0 0]
+ assert {$id eq $id2}
+ set global_pel [r XPENDING mystream mygroup - + 10]
+ assert {[llength $global_pel] == 3}
+ }
+
+ test {XACK can't remove the same item multiple times} {
+ assert {[r XACK mystream mygroup $id1] eq 0}
+ }
+
+ test {XACK is able to accept multiple arguments} {
+ # One of the IDs was already removed, so it should ack
+ # just ID2.
+ assert {[r XACK mystream mygroup $id1 $id2] eq 1}
+ }
+
+ test {XACK should fail if got at least one invalid ID} {
+ r del mystream
+ r xgroup create s g $ MKSTREAM
+ r xadd s * f1 v1
+ set c [llength [lindex [r xreadgroup group g c streams s >] 0 1]]
+ assert {$c == 1}
+ set pending [r xpending s g - + 10 c]
+ set id1 [lindex $pending 0 0]
+ assert_error "*Invalid stream ID specified*" {r xack s g $id1 invalid-id}
+ assert {[r xack s g $id1] eq 1}
+ }
+
+ test {PEL NACK reassignment after XGROUP SETID event} {
+ r del events
+ r xadd events * f1 v1
+ r xadd events * f1 v1
+ r xadd events * f1 v1
+ r xadd events * f1 v1
+ r xgroup create events g1 $
+ r xadd events * f1 v1
+ set c [llength [lindex [r xreadgroup group g1 c1 streams events >] 0 1]]
+ assert {$c == 1}
+ r xgroup setid events g1 -
+ set c [llength [lindex [r xreadgroup group g1 c2 streams events >] 0 1]]
+ assert {$c == 5}
+ }
+
+ test {XREADGROUP will not report data on empty history. Bug #5577} {
+ r del events
+ r xadd events * a 1
+ r xadd events * b 2
+ r xadd events * c 3
+ r xgroup create events mygroup 0
+
+ # Current local PEL should be empty
+ set res [r xpending events mygroup - + 10]
+ assert {[llength $res] == 0}
+
+ # So XREADGROUP should read an empty history as well
+ set res [r xreadgroup group mygroup myconsumer count 3 streams events 0]
+ assert {[llength [lindex $res 0 1]] == 0}
+
+ # We should fetch all the elements in the stream asking for >
+ set res [r xreadgroup group mygroup myconsumer count 3 streams events >]
+ assert {[llength [lindex $res 0 1]] == 3}
+
+ # Now the history is populated with three not acked entries
+ set res [r xreadgroup group mygroup myconsumer count 3 streams events 0]
+ assert {[llength [lindex $res 0 1]] == 3}
+ }
+
+ test {XREADGROUP history reporting of deleted entries. Bug #5570} {
+ r del mystream
+ r XGROUP CREATE mystream mygroup $ MKSTREAM
+ r XADD mystream 1 field1 A
+ r XREADGROUP GROUP mygroup myconsumer STREAMS mystream >
+ r XADD mystream MAXLEN 1 2 field1 B
+ r XREADGROUP GROUP mygroup myconsumer STREAMS mystream >
+
+ # Now we have two pending entries, however one should be deleted
+ # and one should be ok (we should only see "B")
+ set res [r XREADGROUP GROUP mygroup myconsumer STREAMS mystream 0-1]
+ assert {[lindex $res 0 1 0] == {1-0 {}}}
+ assert {[lindex $res 0 1 1] == {2-0 {field1 B}}}
+ }
+
+ test {Blocking XREADGROUP will not reply with an empty array} {
+ r del mystream
+ r XGROUP CREATE mystream mygroup $ MKSTREAM
+ r XADD mystream 666 f v
+ set res [r XREADGROUP GROUP mygroup Alice BLOCK 10 STREAMS mystream ">"]
+ assert {[lindex $res 0 1 0] == {666-0 {f v}}}
+ r XADD mystream 667 f2 v2
+ r XDEL mystream 667
+ set rd [redis_deferring_client]
+ $rd XREADGROUP GROUP mygroup Alice BLOCK 10 STREAMS mystream ">"
+ wait_for_blocked_clients_count 0
+ assert {[$rd read] == {}} ;# before the fix, client didn't even block, but was served synchronously with {mystream {}}
+ $rd close
+ }
+
+ test {Blocking XREADGROUP: key deleted} {
+ r DEL mystream
+ r XADD mystream 666 f v
+ r XGROUP CREATE mystream mygroup $
+ set rd [redis_deferring_client]
+ $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
+ wait_for_blocked_clients_count 1
+ r DEL mystream
+ assert_error "NOGROUP*" {$rd read}
+ $rd close
+ }
+
+ test {Blocking XREADGROUP: key type changed with SET} {
+ r DEL mystream
+ r XADD mystream 666 f v
+ r XGROUP CREATE mystream mygroup $
+ set rd [redis_deferring_client]
+ $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
+ wait_for_blocked_clients_count 1
+ r SET mystream val1
+ assert_error "*WRONGTYPE*" {$rd read}
+ $rd close
+ }
+
+ test {Blocking XREADGROUP: key type changed with transaction} {
+ r DEL mystream
+ r XADD mystream 666 f v
+ r XGROUP CREATE mystream mygroup $
+ set rd [redis_deferring_client]
+ $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
+ wait_for_blocked_clients_count 1
+ r MULTI
+ r DEL mystream
+ r SADD mystream e1
+ r EXEC
+ assert_error "*WRONGTYPE*" {$rd read}
+ $rd close
+ }
+
+ test {Blocking XREADGROUP: flushed DB} {
+ r DEL mystream
+ r XADD mystream 666 f v
+ r XGROUP CREATE mystream mygroup $
+ set rd [redis_deferring_client]
+ $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
+ wait_for_blocked_clients_count 1
+ r FLUSHALL
+ assert_error "*NOGROUP*" {$rd read}
+ $rd close
+ }
+
+ test {Blocking XREADGROUP: swapped DB, key doesn't exist} {
+ r SELECT 4
+ r FLUSHDB
+ r SELECT 9
+ r DEL mystream
+ r XADD mystream 666 f v
+ r XGROUP CREATE mystream mygroup $
+ set rd [redis_deferring_client]
+ $rd SELECT 9
+ $rd read
+ $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
+ wait_for_blocked_clients_count 1
+ r SWAPDB 4 9
+ assert_error "*NOGROUP*" {$rd read}
+ $rd close
+ } {0} {external:skip}
+
+ test {Blocking XREADGROUP: swapped DB, key is not a stream} {
+ r SELECT 4
+ r FLUSHDB
+ r LPUSH mystream e1
+ r SELECT 9
+ r DEL mystream
+ r XADD mystream 666 f v
+ r XGROUP CREATE mystream mygroup $
+ set rd [redis_deferring_client]
+ $rd SELECT 9
+ $rd read
+ $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
+ wait_for_blocked_clients_count 1
+ r SWAPDB 4 9
+ assert_error "*WRONGTYPE*" {$rd read}
+ $rd close
+ } {0} {external:skip}
+
+ test {XREAD and XREADGROUP against wrong parameter} {
+ r DEL mystream
+ r XADD mystream 666 f v
+ r XGROUP CREATE mystream mygroup $
+ assert_error "ERR Unbalanced 'xreadgroup' list of streams: for each stream key an ID or '>' must be specified." {r XREADGROUP GROUP mygroup Alice COUNT 1 STREAMS mystream }
+ assert_error "ERR Unbalanced 'xread' list of streams: for each stream key an ID or '$' must be specified." {r XREAD COUNT 1 STREAMS mystream }
+ }
+
+ test {Blocking XREAD: key deleted} {
+ r DEL mystream
+ r XADD mystream 666 f v
+ set rd [redis_deferring_client]
+ $rd XREAD BLOCK 0 STREAMS mystream "$"
+ wait_for_blocked_clients_count 1
+ r DEL mystream
+
+ r XADD mystream 667 f v
+ set res [$rd read]
+ assert_equal [lindex $res 0 1 0] {667-0 {f v}}
+ $rd close
+ }
+
+ test {Blocking XREAD: key type changed with SET} {
+ r DEL mystream
+ r XADD mystream 666 f v
+ set rd [redis_deferring_client]
+ $rd XREAD BLOCK 0 STREAMS mystream "$"
+ wait_for_blocked_clients_count 1
+ r SET mystream val1
+
+ r DEL mystream
+ r XADD mystream 667 f v
+ set res [$rd read]
+ assert_equal [lindex $res 0 1 0] {667-0 {f v}}
+ $rd close
+ }
+
+ test {Blocking XREADGROUP for stream that ran dry (issue #5299)} {
+ set rd [redis_deferring_client]
+
+ # Add a entry then delete it, now stream's last_id is 666.
+ r DEL mystream
+ r XGROUP CREATE mystream mygroup $ MKSTREAM
+ r XADD mystream 666 key value
+ r XDEL mystream 666
+
+ # Pass a special `>` ID but without new entry, released on timeout.
+ $rd XREADGROUP GROUP mygroup myconsumer BLOCK 10 STREAMS mystream >
+ assert_equal [$rd read] {}
+
+ # Throw an error if the ID equal or smaller than the last_id.
+ assert_error ERR*equal*smaller* {r XADD mystream 665 key value}
+ assert_error ERR*equal*smaller* {r XADD mystream 666 key value}
+
+ # Entered blocking state and then release because of the new entry.
+ $rd XREADGROUP GROUP mygroup myconsumer BLOCK 0 STREAMS mystream >
+ wait_for_blocked_clients_count 1
+ r XADD mystream 667 key value
+ assert_equal [$rd read] {{mystream {{667-0 {key value}}}}}
+
+ $rd close
+ }
+
+ test "Blocking XREADGROUP will ignore BLOCK if ID is not >" {
+ set rd [redis_deferring_client]
+
+ # Add a entry then delete it, now stream's last_id is 666.
+ r DEL mystream
+ r XGROUP CREATE mystream mygroup $ MKSTREAM
+ r XADD mystream 666 key value
+ r XDEL mystream 666
+
+ # Return right away instead of blocking, return the stream with an
+ # empty list instead of NIL if the ID specified is not the special `>` ID.
+ foreach id {0 600 666 700} {
+ $rd XREADGROUP GROUP mygroup myconsumer BLOCK 0 STREAMS mystream $id
+ assert_equal [$rd read] {{mystream {}}}
+ }
+
+ # After adding a new entry, `XREADGROUP BLOCK` still return the stream
+ # with an empty list because the pending list is empty.
+ r XADD mystream 667 key value
+ foreach id {0 600 666 667 700} {
+ $rd XREADGROUP GROUP mygroup myconsumer BLOCK 0 STREAMS mystream $id
+ assert_equal [$rd read] {{mystream {}}}
+ }
+
+ # After we read it once, the pending list is not empty at this time,
+ # pass any ID smaller than 667 will return one of the pending entry.
+ set res [r XREADGROUP GROUP mygroup myconsumer BLOCK 0 STREAMS mystream >]
+ assert_equal $res {{mystream {{667-0 {key value}}}}}
+ foreach id {0 600 666} {
+ $rd XREADGROUP GROUP mygroup myconsumer BLOCK 0 STREAMS mystream $id
+ assert_equal [$rd read] {{mystream {{667-0 {key value}}}}}
+ }
+
+ # Pass ID equal or greater than 667 will return the stream with an empty list.
+ foreach id {667 700} {
+ $rd XREADGROUP GROUP mygroup myconsumer BLOCK 0 STREAMS mystream $id
+ assert_equal [$rd read] {{mystream {}}}
+ }
+
+ # After we ACK the pending entry, return the stream with an empty list.
+ r XACK mystream mygroup 667
+ foreach id {0 600 666 667 700} {
+ $rd XREADGROUP GROUP mygroup myconsumer BLOCK 0 STREAMS mystream $id
+ assert_equal [$rd read] {{mystream {}}}
+ }
+
+ $rd close
+ }
+
+ test {Blocking XREADGROUP for stream key that has clients blocked on list} {
+ set rd [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+
+ # First delete the stream
+ r DEL mystream
+
+ # now place a client blocked on non-existing key as list
+ $rd2 BLPOP mystream 0
+
+ # wait until we verify the client is blocked
+ wait_for_blocked_clients_count 1
+
+ # verify we only have 1 regular blocking key
+ assert_equal 1 [getInfoProperty [r info clients] total_blocking_keys]
+ assert_equal 0 [getInfoProperty [r info clients] total_blocking_keys_on_nokey]
+
+ # now write mystream as stream
+ r XADD mystream 666 key value
+ r XGROUP CREATE mystream mygroup $ MKSTREAM
+
+ # block another client on xreadgroup
+ $rd XREADGROUP GROUP mygroup myconsumer BLOCK 0 STREAMS mystream ">"
+
+ # wait until we verify we have 2 blocked clients (one for the list and one for the stream)
+ wait_for_blocked_clients_count 2
+
+ # verify we have 1 blocking key which also have clients blocked on nokey condition
+ assert_equal 1 [getInfoProperty [r info clients] total_blocking_keys]
+ assert_equal 1 [getInfoProperty [r info clients] total_blocking_keys_on_nokey]
+
+ # now delete the key and verify we have no clients blocked on nokey condition
+ r DEL mystream
+ assert_error "NOGROUP*" {$rd read}
+ assert_equal 1 [getInfoProperty [r info clients] total_blocking_keys]
+ assert_equal 0 [getInfoProperty [r info clients] total_blocking_keys_on_nokey]
+
+ # close the only left client and make sure we have no more blocking keys
+ $rd2 close
+
+ # wait until we verify we have no more blocked clients
+ wait_for_blocked_clients_count 0
+
+ assert_equal 0 [getInfoProperty [r info clients] total_blocking_keys]
+ assert_equal 0 [getInfoProperty [r info clients] total_blocking_keys_on_nokey]
+
+ $rd close
+ }
+
+ test {Blocking XREADGROUP for stream key that has clients blocked on list - avoid endless loop} {
+ r DEL mystream
+ r XGROUP CREATE mystream mygroup $ MKSTREAM
+
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+ set rd3 [redis_deferring_client]
+
+ $rd1 xreadgroup GROUP mygroup myuser COUNT 10 BLOCK 10000 STREAMS mystream >
+ $rd2 xreadgroup GROUP mygroup myuser COUNT 10 BLOCK 10000 STREAMS mystream >
+ $rd3 xreadgroup GROUP mygroup myuser COUNT 10 BLOCK 10000 STREAMS mystream >
+
+ wait_for_blocked_clients_count 3
+
+ r xadd mystream MAXLEN 5000 * field1 value1 field2 value2 field3 value3
+
+ $rd1 close
+ $rd2 close
+ $rd3 close
+
+ assert_equal [r ping] {PONG}
+ }
+
+ test {XGROUP DESTROY should unblock XREADGROUP with -NOGROUP} {
+ r config resetstat
+ r del mystream
+ r XGROUP CREATE mystream mygroup $ MKSTREAM
+ set rd [redis_deferring_client]
+ $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">"
+ wait_for_blocked_clients_count 1
+ r XGROUP DESTROY mystream mygroup
+ assert_error "NOGROUP*" {$rd read}
+ $rd close
+
+ # verify command stats, error stats and error counter work on failed blocked command
+ assert_match {*count=1*} [errorrstat NOGROUP r]
+ assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdrstat xreadgroup r]
+ assert_equal [s total_error_replies] 1
+ }
+
+ test {RENAME can unblock XREADGROUP with data} {
+ r del mystream{t}
+ r XGROUP CREATE mystream{t} mygroup $ MKSTREAM
+ set rd [redis_deferring_client]
+ $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream{t} ">"
+ wait_for_blocked_clients_count 1
+ r XGROUP CREATE mystream2{t} mygroup $ MKSTREAM
+ r XADD mystream2{t} 100 f1 v1
+ r RENAME mystream2{t} mystream{t}
+ assert_equal "{mystream{t} {{100-0 {f1 v1}}}}" [$rd read] ;# mystream2{t} had mygroup before RENAME
+ $rd close
+ }
+
+ test {RENAME can unblock XREADGROUP with -NOGROUP} {
+ r del mystream{t}
+ r XGROUP CREATE mystream{t} mygroup $ MKSTREAM
+ set rd [redis_deferring_client]
+ $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream{t} ">"
+ wait_for_blocked_clients_count 1
+ r XADD mystream2{t} 100 f1 v1
+ r RENAME mystream2{t} mystream{t}
+ assert_error "*NOGROUP*" {$rd read} ;# mystream2{t} didn't have mygroup before RENAME
+ $rd close
+ }
+
+ test {XCLAIM can claim PEL items from another consumer} {
+ # Add 3 items into the stream, and create a consumer group
+ r del mystream
+ set id1 [r XADD mystream * a 1]
+ set id2 [r XADD mystream * b 2]
+ set id3 [r XADD mystream * c 3]
+ r XGROUP CREATE mystream mygroup 0
+
+ # Consumer 1 reads item 1 from the stream without acknowledgements.
+ # Consumer 2 then claims pending item 1 from the PEL of consumer 1
+ set reply [
+ r XREADGROUP GROUP mygroup consumer1 count 1 STREAMS mystream >
+ ]
+ assert {[llength [lindex $reply 0 1 0 1]] == 2}
+ assert {[lindex $reply 0 1 0 1] eq {a 1}}
+
+ # make sure the entry is present in both the group, and the right consumer
+ assert {[llength [r XPENDING mystream mygroup - + 10]] == 1}
+ assert {[llength [r XPENDING mystream mygroup - + 10 consumer1]] == 1}
+ assert {[llength [r XPENDING mystream mygroup - + 10 consumer2]] == 0}
+
+ after 200
+ set reply [
+ r XCLAIM mystream mygroup consumer2 10 $id1
+ ]
+ assert {[llength [lindex $reply 0 1]] == 2}
+ assert {[lindex $reply 0 1] eq {a 1}}
+
+ # make sure the entry is present in both the group, and the right consumer
+ assert {[llength [r XPENDING mystream mygroup - + 10]] == 1}
+ assert {[llength [r XPENDING mystream mygroup - + 10 consumer1]] == 0}
+ assert {[llength [r XPENDING mystream mygroup - + 10 consumer2]] == 1}
+
+ # Consumer 1 reads another 2 items from stream
+ r XREADGROUP GROUP mygroup consumer1 count 2 STREAMS mystream >
+ after 200
+
+ # Delete item 2 from the stream. Now consumer 1 has PEL that contains
+ # only item 3. Try to use consumer 2 to claim the deleted item 2
+ # from the PEL of consumer 1, this should be NOP
+ r XDEL mystream $id2
+ set reply [
+ r XCLAIM mystream mygroup consumer2 10 $id2
+ ]
+ assert {[llength $reply] == 0}
+
+ # Delete item 3 from the stream. Now consumer 1 has PEL that is empty.
+ # Try to use consumer 2 to claim the deleted item 3 from the PEL
+ # of consumer 1, this should be NOP
+ after 200
+ r XDEL mystream $id3
+ set reply [
+ r XCLAIM mystream mygroup consumer2 10 $id3
+ ]
+ assert {[llength $reply] == 0}
+ }
+
+ test {XCLAIM without JUSTID increments delivery count} {
+ # Add 3 items into the stream, and create a consumer group
+ r del mystream
+ set id1 [r XADD mystream * a 1]
+ set id2 [r XADD mystream * b 2]
+ set id3 [r XADD mystream * c 3]
+ r XGROUP CREATE mystream mygroup 0
+
+ # Consumer 1 reads item 1 from the stream without acknowledgements.
+ # Consumer 2 then claims pending item 1 from the PEL of consumer 1
+ set reply [
+ r XREADGROUP GROUP mygroup consumer1 count 1 STREAMS mystream >
+ ]
+ assert {[llength [lindex $reply 0 1 0 1]] == 2}
+ assert {[lindex $reply 0 1 0 1] eq {a 1}}
+ after 200
+ set reply [
+ r XCLAIM mystream mygroup consumer2 10 $id1
+ ]
+ assert {[llength [lindex $reply 0 1]] == 2}
+ assert {[lindex $reply 0 1] eq {a 1}}
+
+ set reply [
+ r XPENDING mystream mygroup - + 10
+ ]
+ assert {[llength [lindex $reply 0]] == 4}
+ assert {[lindex $reply 0 3] == 2}
+
+ # Consumer 3 then claims pending item 1 from the PEL of consumer 2 using JUSTID
+ after 200
+ set reply [
+ r XCLAIM mystream mygroup consumer3 10 $id1 JUSTID
+ ]
+ assert {[llength $reply] == 1}
+ assert {[lindex $reply 0] eq $id1}
+
+ set reply [
+ r XPENDING mystream mygroup - + 10
+ ]
+ assert {[llength [lindex $reply 0]] == 4}
+ assert {[lindex $reply 0 3] == 2}
+ }
+
+ test {XCLAIM same consumer} {
+ # Add 3 items into the stream, and create a consumer group
+ r del mystream
+ set id1 [r XADD mystream * a 1]
+ set id2 [r XADD mystream * b 2]
+ set id3 [r XADD mystream * c 3]
+ r XGROUP CREATE mystream mygroup 0
+
+ set reply [r XREADGROUP GROUP mygroup consumer1 count 1 STREAMS mystream >]
+ assert {[llength [lindex $reply 0 1 0 1]] == 2}
+ assert {[lindex $reply 0 1 0 1] eq {a 1}}
+ after 200
+ # re-claim with the same consumer that already has it
+ assert {[llength [r XCLAIM mystream mygroup consumer1 10 $id1]] == 1}
+
+ # make sure the entry is still in the PEL
+ set reply [r XPENDING mystream mygroup - + 10]
+ assert {[llength $reply] == 1}
+ assert {[lindex $reply 0 1] eq {consumer1}}
+ }
+
+ test {XAUTOCLAIM can claim PEL items from another consumer} {
+ # Add 3 items into the stream, and create a consumer group
+ r del mystream
+ set id1 [r XADD mystream * a 1]
+ set id2 [r XADD mystream * b 2]
+ set id3 [r XADD mystream * c 3]
+ set id4 [r XADD mystream * d 4]
+ r XGROUP CREATE mystream mygroup 0
+
+ # Consumer 1 reads item 1 from the stream without acknowledgements.
+ # Consumer 2 then claims pending item 1 from the PEL of consumer 1
+ set reply [r XREADGROUP GROUP mygroup consumer1 count 1 STREAMS mystream >]
+ assert_equal [llength [lindex $reply 0 1 0 1]] 2
+ assert_equal [lindex $reply 0 1 0 1] {a 1}
+ after 200
+ set reply [r XAUTOCLAIM mystream mygroup consumer2 10 - COUNT 1]
+ assert_equal [llength $reply] 3
+ assert_equal [lindex $reply 0] "0-0"
+ assert_equal [llength [lindex $reply 1]] 1
+ assert_equal [llength [lindex $reply 1 0]] 2
+ assert_equal [llength [lindex $reply 1 0 1]] 2
+ assert_equal [lindex $reply 1 0 1] {a 1}
+
+ # Consumer 1 reads another 2 items from stream
+ r XREADGROUP GROUP mygroup consumer1 count 3 STREAMS mystream >
+
+ # For min-idle-time
+ after 200
+
+ # Delete item 2 from the stream. Now consumer 1 has PEL that contains
+ # only item 3. Try to use consumer 2 to claim the deleted item 2
+ # from the PEL of consumer 1, this should return nil
+ r XDEL mystream $id2
+
+ # id1 and id3 are self-claimed here but not id2 ('count' was set to 3)
+ # we make sure id2 is indeed skipped (the cursor points to id4)
+ set reply [r XAUTOCLAIM mystream mygroup consumer2 10 - COUNT 3]
+
+ assert_equal [llength $reply] 3
+ assert_equal [lindex $reply 0] $id4
+ assert_equal [llength [lindex $reply 1]] 2
+ assert_equal [llength [lindex $reply 1 0]] 2
+ assert_equal [llength [lindex $reply 1 0 1]] 2
+ assert_equal [lindex $reply 1 0 1] {a 1}
+ assert_equal [lindex $reply 1 1 1] {c 3}
+ assert_equal [llength [lindex $reply 2]] 1
+ assert_equal [llength [lindex $reply 2 0]] 1
+
+ # Delete item 3 from the stream. Now consumer 1 has PEL that is empty.
+ # Try to use consumer 2 to claim the deleted item 3 from the PEL
+ # of consumer 1, this should return nil
+ after 200
+
+ r XDEL mystream $id4
+
+ # id1 and id3 are self-claimed here but not id2 and id4 ('count' is default 100)
+ set reply [r XAUTOCLAIM mystream mygroup consumer2 10 - JUSTID]
+
+ # we also test the JUSTID modifier here. note that, when using JUSTID,
+ # deleted entries are returned in reply (consistent with XCLAIM).
+
+ assert_equal [llength $reply] 3
+ assert_equal [lindex $reply 0] {0-0}
+ assert_equal [llength [lindex $reply 1]] 2
+ assert_equal [lindex $reply 1 0] $id1
+ assert_equal [lindex $reply 1 1] $id3
+ }
+
+ test {XAUTOCLAIM as an iterator} {
+ # Add 5 items into the stream, and create a consumer group
+ r del mystream
+ set id1 [r XADD mystream * a 1]
+ set id2 [r XADD mystream * b 2]
+ set id3 [r XADD mystream * c 3]
+ set id4 [r XADD mystream * d 4]
+ set id5 [r XADD mystream * e 5]
+ r XGROUP CREATE mystream mygroup 0
+
+ # Read 5 messages into consumer1
+ r XREADGROUP GROUP mygroup consumer1 count 90 STREAMS mystream >
+
+ # For min-idle-time
+ after 200
+
+ # Claim 2 entries
+ set reply [r XAUTOCLAIM mystream mygroup consumer2 10 - COUNT 2]
+ assert_equal [llength $reply] 3
+ set cursor [lindex $reply 0]
+ assert_equal $cursor $id3
+ assert_equal [llength [lindex $reply 1]] 2
+ assert_equal [llength [lindex $reply 1 0 1]] 2
+ assert_equal [lindex $reply 1 0 1] {a 1}
+
+ # Claim 2 more entries
+ set reply [r XAUTOCLAIM mystream mygroup consumer2 10 $cursor COUNT 2]
+ assert_equal [llength $reply] 3
+ set cursor [lindex $reply 0]
+ assert_equal $cursor $id5
+ assert_equal [llength [lindex $reply 1]] 2
+ assert_equal [llength [lindex $reply 1 0 1]] 2
+ assert_equal [lindex $reply 1 0 1] {c 3}
+
+ # Claim last entry
+ set reply [r XAUTOCLAIM mystream mygroup consumer2 10 $cursor COUNT 1]
+ assert_equal [llength $reply] 3
+ set cursor [lindex $reply 0]
+ assert_equal $cursor {0-0}
+ assert_equal [llength [lindex $reply 1]] 1
+ assert_equal [llength [lindex $reply 1 0 1]] 2
+ assert_equal [lindex $reply 1 0 1] {e 5}
+ }
+
+ test {XAUTOCLAIM COUNT must be > 0} {
+ assert_error "ERR COUNT must be > 0" {r XAUTOCLAIM key group consumer 1 1 COUNT 0}
+ }
+
+ test {XCLAIM with XDEL} {
+ r DEL x
+ r XADD x 1-0 f v
+ r XADD x 2-0 f v
+ r XADD x 3-0 f v
+ r XGROUP CREATE x grp 0
+ assert_equal [r XREADGROUP GROUP grp Alice STREAMS x >] {{x {{1-0 {f v}} {2-0 {f v}} {3-0 {f v}}}}}
+ r XDEL x 2-0
+ assert_equal [r XCLAIM x grp Bob 0 1-0 2-0 3-0] {{1-0 {f v}} {3-0 {f v}}}
+ assert_equal [r XPENDING x grp - + 10 Alice] {}
+ }
+
+ test {XCLAIM with trimming} {
+ r DEL x
+ r config set stream-node-max-entries 2
+ r XADD x 1-0 f v
+ r XADD x 2-0 f v
+ r XADD x 3-0 f v
+ r XGROUP CREATE x grp 0
+ assert_equal [r XREADGROUP GROUP grp Alice STREAMS x >] {{x {{1-0 {f v}} {2-0 {f v}} {3-0 {f v}}}}}
+ r XTRIM x MAXLEN 1
+ assert_equal [r XCLAIM x grp Bob 0 1-0 2-0 3-0] {{3-0 {f v}}}
+ assert_equal [r XPENDING x grp - + 10 Alice] {}
+ }
+
+ test {XAUTOCLAIM with XDEL} {
+ r DEL x
+ r XADD x 1-0 f v
+ r XADD x 2-0 f v
+ r XADD x 3-0 f v
+ r XGROUP CREATE x grp 0
+ assert_equal [r XREADGROUP GROUP grp Alice STREAMS x >] {{x {{1-0 {f v}} {2-0 {f v}} {3-0 {f v}}}}}
+ r XDEL x 2-0
+ assert_equal [r XAUTOCLAIM x grp Bob 0 0-0] {0-0 {{1-0 {f v}} {3-0 {f v}}} 2-0}
+ assert_equal [r XPENDING x grp - + 10 Alice] {}
+ }
+
+ test {XAUTOCLAIM with XDEL and count} {
+ r DEL x
+ r XADD x 1-0 f v
+ r XADD x 2-0 f v
+ r XADD x 3-0 f v
+ r XGROUP CREATE x grp 0
+ assert_equal [r XREADGROUP GROUP grp Alice STREAMS x >] {{x {{1-0 {f v}} {2-0 {f v}} {3-0 {f v}}}}}
+ r XDEL x 1-0
+ r XDEL x 2-0
+ assert_equal [r XAUTOCLAIM x grp Bob 0 0-0 COUNT 1] {2-0 {} 1-0}
+ assert_equal [r XAUTOCLAIM x grp Bob 0 2-0 COUNT 1] {3-0 {} 2-0}
+ assert_equal [r XAUTOCLAIM x grp Bob 0 3-0 COUNT 1] {0-0 {{3-0 {f v}}} {}}
+ assert_equal [r XPENDING x grp - + 10 Alice] {}
+ }
+
+ test {XAUTOCLAIM with out of range count} {
+ assert_error {ERR COUNT*} {r XAUTOCLAIM x grp Bob 0 3-0 COUNT 8070450532247928833}
+ }
+
+ test {XCLAIM with trimming} {
+ r DEL x
+ r config set stream-node-max-entries 2
+ r XADD x 1-0 f v
+ r XADD x 2-0 f v
+ r XADD x 3-0 f v
+ r XGROUP CREATE x grp 0
+ assert_equal [r XREADGROUP GROUP grp Alice STREAMS x >] {{x {{1-0 {f v}} {2-0 {f v}} {3-0 {f v}}}}}
+ r XTRIM x MAXLEN 1
+ assert_equal [r XAUTOCLAIM x grp Bob 0 0-0] {0-0 {{3-0 {f v}}} {1-0 2-0}}
+ assert_equal [r XPENDING x grp - + 10 Alice] {}
+ }
+
+ test {XINFO FULL output} {
+ r del x
+ r XADD x 100 a 1
+ r XADD x 101 b 1
+ r XADD x 102 c 1
+ r XADD x 103 e 1
+ r XADD x 104 f 1
+ r XGROUP CREATE x g1 0
+ r XGROUP CREATE x g2 0
+ r XREADGROUP GROUP g1 Alice COUNT 1 STREAMS x >
+ r XREADGROUP GROUP g1 Bob COUNT 1 STREAMS x >
+ r XREADGROUP GROUP g1 Bob NOACK COUNT 1 STREAMS x >
+ r XREADGROUP GROUP g2 Charlie COUNT 4 STREAMS x >
+ r XDEL x 103
+
+ set reply [r XINFO STREAM x FULL]
+ assert_equal [llength $reply] 18
+ assert_equal [dict get $reply length] 4
+ assert_equal [dict get $reply entries] "{100-0 {a 1}} {101-0 {b 1}} {102-0 {c 1}} {104-0 {f 1}}"
+
+ # First consumer group
+ set group [lindex [dict get $reply groups] 0]
+ assert_equal [dict get $group name] "g1"
+ assert_equal [lindex [dict get $group pending] 0 0] "100-0"
+ set consumer [lindex [dict get $group consumers] 0]
+ assert_equal [dict get $consumer name] "Alice"
+ assert_equal [lindex [dict get $consumer pending] 0 0] "100-0" ;# first entry in first consumer's PEL
+
+ # Second consumer group
+ set group [lindex [dict get $reply groups] 1]
+ assert_equal [dict get $group name] "g2"
+ set consumer [lindex [dict get $group consumers] 0]
+ assert_equal [dict get $consumer name] "Charlie"
+ assert_equal [lindex [dict get $consumer pending] 0 0] "100-0" ;# first entry in first consumer's PEL
+ assert_equal [lindex [dict get $consumer pending] 1 0] "101-0" ;# second entry in first consumer's PEL
+
+ set reply [r XINFO STREAM x FULL COUNT 1]
+ assert_equal [llength $reply] 18
+ assert_equal [dict get $reply length] 4
+ assert_equal [dict get $reply entries] "{100-0 {a 1}}"
+ }
+
+ test {Consumer seen-time and active-time} {
+ r DEL mystream
+ r XGROUP CREATE mystream mygroup $ MKSTREAM
+ r XREADGROUP GROUP mygroup Alice COUNT 1 STREAMS mystream >
+ after 100
+ set reply [r xinfo consumers mystream mygroup]
+ set consumer_info [lindex $reply 0]
+ assert {[dict get $consumer_info idle] >= 100} ;# consumer idle (seen-time)
+ assert_equal [dict get $consumer_info inactive] "-1" ;# consumer inactive (active-time)
+
+ r XADD mystream * f v
+ r XREADGROUP GROUP mygroup Alice COUNT 1 STREAMS mystream >
+ set reply [r xinfo consumers mystream mygroup]
+ set consumer_info [lindex $reply 0]
+ assert_equal [lindex $consumer_info 1] "Alice" ;# consumer name
+ assert {[dict get $consumer_info idle] < 80} ;# consumer idle (seen-time)
+ assert {[dict get $consumer_info inactive] < 80} ;# consumer inactive (active-time)
+
+ after 100
+ r XREADGROUP GROUP mygroup Alice COUNT 1 STREAMS mystream >
+ set reply [r xinfo consumers mystream mygroup]
+ set consumer_info [lindex $reply 0]
+ assert {[dict get $consumer_info idle] < 80} ;# consumer idle (seen-time)
+ assert {[dict get $consumer_info inactive] >= 100} ;# consumer inactive (active-time)
+
+
+ # Simulate loading from RDB
+
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ set consumer [lindex [dict get $group consumers] 0]
+ set prev_seen [dict get $consumer seen-time]
+ set prev_active [dict get $consumer active-time]
+
+ set dump [r DUMP mystream]
+ r DEL mystream
+ r RESTORE mystream 0 $dump
+
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ set consumer [lindex [dict get $group consumers] 0]
+ assert_equal $prev_seen [dict get $consumer seen-time]
+ assert_equal $prev_active [dict get $consumer active-time]
+ }
+
+ test {XGROUP CREATECONSUMER: create consumer if does not exist} {
+ r del mystream
+ r XGROUP CREATE mystream mygroup $ MKSTREAM
+ r XADD mystream * f v
+
+ set reply [r xinfo groups mystream]
+ set group_info [lindex $reply 0]
+ set n_consumers [lindex $group_info 3]
+ assert_equal $n_consumers 0 ;# consumers number in cg
+
+ # create consumer using XREADGROUP
+ r XREADGROUP GROUP mygroup Alice COUNT 1 STREAMS mystream >
+
+ set reply [r xinfo groups mystream]
+ set group_info [lindex $reply 0]
+ set n_consumers [lindex $group_info 3]
+ assert_equal $n_consumers 1 ;# consumers number in cg
+
+ set reply [r xinfo consumers mystream mygroup]
+ set consumer_info [lindex $reply 0]
+ assert_equal [lindex $consumer_info 1] "Alice" ;# consumer name
+
+ # create group using XGROUP CREATECONSUMER when Alice already exists
+ set created [r XGROUP CREATECONSUMER mystream mygroup Alice]
+ assert_equal $created 0
+
+ # create group using XGROUP CREATECONSUMER when Bob does not exist
+ set created [r XGROUP CREATECONSUMER mystream mygroup Bob]
+ assert_equal $created 1
+
+ set reply [r xinfo groups mystream]
+ set group_info [lindex $reply 0]
+ set n_consumers [lindex $group_info 3]
+ assert_equal $n_consumers 2 ;# consumers number in cg
+
+ set reply [r xinfo consumers mystream mygroup]
+ set consumer_info [lindex $reply 0]
+ assert_equal [lindex $consumer_info 1] "Alice" ;# consumer name
+ set consumer_info [lindex $reply 1]
+ assert_equal [lindex $consumer_info 1] "Bob" ;# consumer name
+ }
+
+ test {XGROUP CREATECONSUMER: group must exist} {
+ r del mystream
+ r XADD mystream * f v
+ assert_error "*NOGROUP*" {r XGROUP CREATECONSUMER mystream mygroup consumer}
+ }
+
+ start_server {tags {"stream needs:debug"} overrides {appendonly yes aof-use-rdb-preamble no appendfsync always}} {
+ test {XREADGROUP with NOACK creates consumer} {
+ r del mystream
+ r XGROUP CREATE mystream mygroup $ MKSTREAM
+ r XADD mystream * f1 v1
+ r XREADGROUP GROUP mygroup Alice NOACK STREAMS mystream ">"
+ set rd [redis_deferring_client]
+ $rd XREADGROUP GROUP mygroup Bob BLOCK 0 NOACK STREAMS mystream ">"
+ wait_for_blocked_clients_count 1
+ r XADD mystream * f2 v2
+ set grpinfo [r xinfo groups mystream]
+
+ r debug loadaof
+ assert_equal [r xinfo groups mystream] $grpinfo
+ set reply [r xinfo consumers mystream mygroup]
+ set consumer_info [lindex $reply 0]
+ assert_equal [lindex $consumer_info 1] "Alice" ;# consumer name
+ set consumer_info [lindex $reply 1]
+ assert_equal [lindex $consumer_info 1] "Bob" ;# consumer name
+ $rd close
+ }
+
+ test {Consumer without PEL is present in AOF after AOFRW} {
+ r del mystream
+ r XGROUP CREATE mystream mygroup $ MKSTREAM
+ r XADD mystream * f v
+ r XREADGROUP GROUP mygroup Alice NOACK STREAMS mystream ">"
+ set rd [redis_deferring_client]
+ $rd XREADGROUP GROUP mygroup Bob BLOCK 0 NOACK STREAMS mystream ">"
+ wait_for_blocked_clients_count 1
+ r XGROUP CREATECONSUMER mystream mygroup Charlie
+ set grpinfo [lindex [r xinfo groups mystream] 0]
+
+ r bgrewriteaof
+ waitForBgrewriteaof r
+ r debug loadaof
+
+ set curr_grpinfo [lindex [r xinfo groups mystream] 0]
+ assert {$curr_grpinfo == $grpinfo}
+ set n_consumers [lindex $grpinfo 3]
+
+ # All consumers are created via XREADGROUP, regardless of whether they managed
+ # to read any entries ot not
+ assert_equal $n_consumers 3
+ $rd close
+ }
+ }
+
+ test {Consumer group read counter and lag in empty streams} {
+ r DEL x
+ r XGROUP CREATE x g1 0 MKSTREAM
+
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ assert_equal [dict get $reply max-deleted-entry-id] "0-0"
+ assert_equal [dict get $reply entries-added] 0
+ assert_equal [dict get $group entries-read] {}
+ assert_equal [dict get $group lag] 0
+
+ r XADD x 1-0 data a
+ r XDEL x 1-0
+
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ assert_equal [dict get $reply max-deleted-entry-id] "1-0"
+ assert_equal [dict get $reply entries-added] 1
+ assert_equal [dict get $group entries-read] {}
+ assert_equal [dict get $group lag] 0
+ }
+
+ test {Consumer group read counter and lag sanity} {
+ r DEL x
+ r XADD x 1-0 data a
+ r XADD x 2-0 data b
+ r XADD x 3-0 data c
+ r XADD x 4-0 data d
+ r XADD x 5-0 data e
+ r XGROUP CREATE x g1 0
+
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ assert_equal [dict get $group entries-read] {}
+ assert_equal [dict get $group lag] 5
+
+ r XREADGROUP GROUP g1 c11 COUNT 1 STREAMS x >
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ assert_equal [dict get $group entries-read] 1
+ assert_equal [dict get $group lag] 4
+
+ r XREADGROUP GROUP g1 c12 COUNT 10 STREAMS x >
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ assert_equal [dict get $group entries-read] 5
+ assert_equal [dict get $group lag] 0
+
+ r XADD x 6-0 data f
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ assert_equal [dict get $group entries-read] 5
+ assert_equal [dict get $group lag] 1
+ }
+
+ test {Consumer group lag with XDELs} {
+ r DEL x
+ r XADD x 1-0 data a
+ r XADD x 2-0 data b
+ r XADD x 3-0 data c
+ r XADD x 4-0 data d
+ r XADD x 5-0 data e
+ r XDEL x 3-0
+ r XGROUP CREATE x g1 0
+ r XGROUP CREATE x g2 0
+
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ assert_equal [dict get $group entries-read] {}
+ assert_equal [dict get $group lag] {}
+
+ r XREADGROUP GROUP g1 c11 COUNT 1 STREAMS x >
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ assert_equal [dict get $group entries-read] {}
+ assert_equal [dict get $group lag] {}
+
+ r XREADGROUP GROUP g1 c11 COUNT 1 STREAMS x >
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ assert_equal [dict get $group entries-read] {}
+ assert_equal [dict get $group lag] {}
+
+ r XREADGROUP GROUP g1 c11 COUNT 1 STREAMS x >
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ assert_equal [dict get $group entries-read] {}
+ assert_equal [dict get $group lag] {}
+
+ r XREADGROUP GROUP g1 c11 COUNT 1 STREAMS x >
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ assert_equal [dict get $group entries-read] 5
+ assert_equal [dict get $group lag] 0
+
+ r XADD x 6-0 data f
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ assert_equal [dict get $group entries-read] 5
+ assert_equal [dict get $group lag] 1
+
+ r XTRIM x MINID = 3-0
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ assert_equal [dict get $group entries-read] 5
+ assert_equal [dict get $group lag] 1
+ set group [lindex [dict get $reply groups] 1]
+ assert_equal [dict get $group entries-read] {}
+ assert_equal [dict get $group lag] 3
+
+ r XTRIM x MINID = 5-0
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ assert_equal [dict get $group entries-read] 5
+ assert_equal [dict get $group lag] 1
+ set group [lindex [dict get $reply groups] 1]
+ assert_equal [dict get $group entries-read] {}
+ assert_equal [dict get $group lag] 2
+ }
+
+ test {Loading from legacy (Redis <= v6.2.x, rdb_ver < 10) persistence} {
+ # The payload was DUMPed from a v5 instance after:
+ # XADD x 1-0 data a
+ # XADD x 2-0 data b
+ # XADD x 3-0 data c
+ # XADD x 4-0 data d
+ # XADD x 5-0 data e
+ # XADD x 6-0 data f
+ # XDEL x 3-0
+ # XGROUP CREATE x g1 0
+ # XGROUP CREATE x g2 0
+ # XREADGROUP GROUP g1 c11 COUNT 4 STREAMS x >
+ # XTRIM x MAXLEN = 2
+
+ r DEL x
+ r RESTORE x 0 "\x0F\x01\x10\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\xC3\x40\x4A\x40\x57\x16\x57\x00\x00\x00\x23\x00\x02\x01\x04\x01\x01\x01\x84\x64\x61\x74\x61\x05\x00\x01\x03\x01\x00\x20\x01\x03\x81\x61\x02\x04\x20\x0A\x00\x01\x40\x0A\x00\x62\x60\x0A\x00\x02\x40\x0A\x00\x63\x60\x0A\x40\x22\x01\x81\x64\x20\x0A\x40\x39\x20\x0A\x00\x65\x60\x0A\x00\x05\x40\x0A\x00\x66\x20\x0A\x00\xFF\x02\x06\x00\x02\x02\x67\x31\x05\x00\x04\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x3E\xF7\x83\x43\x7A\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x3E\xF7\x83\x43\x7A\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x3E\xF7\x83\x43\x7A\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x3E\xF7\x83\x43\x7A\x01\x00\x00\x01\x01\x03\x63\x31\x31\x3E\xF7\x83\x43\x7A\x01\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x02\x67\x32\x00\x00\x00\x00\x09\x00\x3D\x52\xEF\x68\x67\x52\x1D\xFA"
+
+ set reply [r XINFO STREAM x FULL]
+ assert_equal [dict get $reply max-deleted-entry-id] "0-0"
+ assert_equal [dict get $reply entries-added] 2
+ set group [lindex [dict get $reply groups] 0]
+ assert_equal [dict get $group entries-read] 1
+ assert_equal [dict get $group lag] 1
+ set group [lindex [dict get $reply groups] 1]
+ assert_equal [dict get $group entries-read] 0
+ assert_equal [dict get $group lag] 2
+ }
+
+ test {Loading from legacy (Redis <= v7.0.x, rdb_ver < 11) persistence} {
+ # The payload was DUMPed from a v7 instance after:
+ # XGROUP CREATE x g $ MKSTREAM
+ # XADD x 1-1 f v
+ # XREADGROUP GROUP g Alice STREAMS x >
+
+ r DEL x
+ r RESTORE x 0 "\x13\x01\x10\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x1D\x1D\x00\x00\x00\x0A\x00\x01\x01\x00\x01\x01\x01\x81\x66\x02\x00\x01\x02\x01\x00\x01\x00\x01\x81\x76\x02\x04\x01\xFF\x01\x01\x01\x01\x01\x00\x00\x01\x01\x01\x67\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\xF5\x5A\x71\xC7\x84\x01\x00\x00\x01\x01\x05\x41\x6C\x69\x63\x65\xF5\x5A\x71\xC7\x84\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x0B\x00\xA7\xA9\x14\xA5\x27\xFF\x9B\x9B"
+ set reply [r XINFO STREAM x FULL]
+ set group [lindex [dict get $reply groups] 0]
+ set consumer [lindex [dict get $group consumers] 0]
+ assert_equal [dict get $consumer seen-time] [dict get $consumer active-time]
+ }
+
+ start_server {tags {"external:skip"}} {
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+ set slave [srv 0 client]
+
+ foreach noack {0 1} {
+ test "Consumer group last ID propagation to slave (NOACK=$noack)" {
+ $slave slaveof $master_host $master_port
+ wait_for_condition 50 100 {
+ [s 0 master_link_status] eq {up}
+ } else {
+ fail "Replication not started."
+ }
+
+ $master del stream
+ $master xadd stream * a 1
+ $master xadd stream * a 2
+ $master xadd stream * a 3
+ $master xgroup create stream mygroup 0
+
+ # Consume the first two items on the master
+ for {set j 0} {$j < 2} {incr j} {
+ if {$noack} {
+ set item [$master xreadgroup group mygroup \
+ myconsumer COUNT 1 NOACK STREAMS stream >]
+ } else {
+ set item [$master xreadgroup group mygroup \
+ myconsumer COUNT 1 STREAMS stream >]
+ }
+ set id [lindex $item 0 1 0 0]
+ if {$noack == 0} {
+ assert {[$master xack stream mygroup $id] eq "1"}
+ }
+ }
+
+ wait_for_ofs_sync $master $slave
+
+ # Turn slave into master
+ $slave slaveof no one
+
+ set item [$slave xreadgroup group mygroup myconsumer \
+ COUNT 1 STREAMS stream >]
+
+ # The consumed entry should be the third
+ set myentry [lindex $item 0 1 0 1]
+ assert {$myentry eq {a 3}}
+ }
+ }
+ }
+
+ start_server {tags {"external:skip"}} {
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+ set replica [srv 0 client]
+
+ foreach autoclaim {0 1} {
+ test "Replication tests of XCLAIM with deleted entries (autclaim=$autoclaim)" {
+ $replica replicaof $master_host $master_port
+ wait_for_condition 50 100 {
+ [s 0 master_link_status] eq {up}
+ } else {
+ fail "Replication not started."
+ }
+
+ $master DEL x
+ $master XADD x 1-0 f v
+ $master XADD x 2-0 f v
+ $master XADD x 3-0 f v
+ $master XADD x 4-0 f v
+ $master XADD x 5-0 f v
+ $master XGROUP CREATE x grp 0
+ assert_equal [$master XREADGROUP GROUP grp Alice STREAMS x >] {{x {{1-0 {f v}} {2-0 {f v}} {3-0 {f v}} {4-0 {f v}} {5-0 {f v}}}}}
+ wait_for_ofs_sync $master $replica
+ assert_equal [llength [$replica XPENDING x grp - + 10 Alice]] 5
+ $master XDEL x 2-0
+ $master XDEL x 4-0
+ if {$autoclaim} {
+ assert_equal [$master XAUTOCLAIM x grp Bob 0 0-0] {0-0 {{1-0 {f v}} {3-0 {f v}} {5-0 {f v}}} {2-0 4-0}}
+ wait_for_ofs_sync $master $replica
+ assert_equal [llength [$replica XPENDING x grp - + 10 Alice]] 0
+ } else {
+ assert_equal [$master XCLAIM x grp Bob 0 1-0 2-0 3-0 4-0] {{1-0 {f v}} {3-0 {f v}}}
+ wait_for_ofs_sync $master $replica
+ assert_equal [llength [$replica XPENDING x grp - + 10 Alice]] 1
+ }
+ }
+ }
+ }
+
+ start_server {tags {"stream needs:debug"} overrides {appendonly yes aof-use-rdb-preamble no}} {
+ test {Empty stream with no lastid can be rewrite into AOF correctly} {
+ r XGROUP CREATE mystream group-name $ MKSTREAM
+ assert {[dict get [r xinfo stream mystream] length] == 0}
+ set grpinfo [r xinfo groups mystream]
+ r bgrewriteaof
+ waitForBgrewriteaof r
+ r debug loadaof
+ assert {[dict get [r xinfo stream mystream] length] == 0}
+ assert_equal [r xinfo groups mystream] $grpinfo
+ }
+ }
+}
diff --git a/tests/unit/type/stream.tcl b/tests/unit/type/stream.tcl
new file mode 100644
index 0000000..3081c40
--- /dev/null
+++ b/tests/unit/type/stream.tcl
@@ -0,0 +1,940 @@
+# return value is like strcmp() and similar.
+proc streamCompareID {a b} {
+ if {$a eq $b} {return 0}
+ lassign [split $a -] a_ms a_seq
+ lassign [split $b -] b_ms b_seq
+ if {$a_ms > $b_ms} {return 1}
+ if {$a_ms < $b_ms} {return -1}
+ # Same ms case, compare seq.
+ if {$a_seq > $b_seq} {return 1}
+ if {$a_seq < $b_seq} {return -1}
+}
+
+# return the ID immediately greater than the specified one.
+# Note that this function does not care to handle 'seq' overflow
+# since it's a 64 bit value.
+proc streamNextID {id} {
+ lassign [split $id -] ms seq
+ incr seq
+ join [list $ms $seq] -
+}
+
+# Generate a random stream entry ID with the ms part between min and max
+# and a low sequence number (0 - 999 range), in order to stress test
+# XRANGE against a Tcl implementation implementing the same concept
+# with Tcl-only code in a linear array.
+proc streamRandomID {min_id max_id} {
+ lassign [split $min_id -] min_ms min_seq
+ lassign [split $max_id -] max_ms max_seq
+ set delta [expr {$max_ms-$min_ms+1}]
+ set ms [expr {$min_ms+[randomInt $delta]}]
+ set seq [randomInt 1000]
+ return $ms-$seq
+}
+
+# Tcl-side implementation of XRANGE to perform fuzz testing in the Redis
+# XRANGE implementation.
+proc streamSimulateXRANGE {items start end} {
+ set res {}
+ foreach i $items {
+ set this_id [lindex $i 0]
+ if {[streamCompareID $this_id $start] >= 0} {
+ if {[streamCompareID $this_id $end] <= 0} {
+ lappend res $i
+ }
+ }
+ }
+ return $res
+}
+
+set content {} ;# Will be populated with Tcl side copy of the stream content.
+
+start_server {
+ tags {"stream"}
+} {
+ test "XADD wrong number of args" {
+ assert_error {*wrong number of arguments for 'xadd' command} {r XADD mystream}
+ assert_error {*wrong number of arguments for 'xadd' command} {r XADD mystream *}
+ assert_error {*wrong number of arguments for 'xadd' command} {r XADD mystream * field}
+ }
+
+ test {XADD can add entries into a stream that XRANGE can fetch} {
+ r XADD mystream * item 1 value a
+ r XADD mystream * item 2 value b
+ assert_equal 2 [r XLEN mystream]
+ set items [r XRANGE mystream - +]
+ assert_equal [lindex $items 0 1] {item 1 value a}
+ assert_equal [lindex $items 1 1] {item 2 value b}
+ }
+
+ test {XADD IDs are incremental} {
+ set id1 [r XADD mystream * item 1 value a]
+ set id2 [r XADD mystream * item 2 value b]
+ set id3 [r XADD mystream * item 3 value c]
+ assert {[streamCompareID $id1 $id2] == -1}
+ assert {[streamCompareID $id2 $id3] == -1}
+ }
+
+ test {XADD IDs are incremental when ms is the same as well} {
+ r multi
+ r XADD mystream * item 1 value a
+ r XADD mystream * item 2 value b
+ r XADD mystream * item 3 value c
+ lassign [r exec] id1 id2 id3
+ assert {[streamCompareID $id1 $id2] == -1}
+ assert {[streamCompareID $id2 $id3] == -1}
+ }
+
+ test {XADD IDs correctly report an error when overflowing} {
+ r DEL mystream
+ r xadd mystream 18446744073709551615-18446744073709551615 a b
+ assert_error ERR* {r xadd mystream * c d}
+ }
+
+ test {XADD auto-generated sequence is incremented for last ID} {
+ r DEL mystream
+ set id1 [r XADD mystream 123-456 item 1 value a]
+ set id2 [r XADD mystream 123-* item 2 value b]
+ lassign [split $id2 -] _ seq
+ assert {$seq == 457}
+ assert {[streamCompareID $id1 $id2] == -1}
+ }
+
+ test {XADD auto-generated sequence is zero for future timestamp ID} {
+ r DEL mystream
+ set id1 [r XADD mystream 123-456 item 1 value a]
+ set id2 [r XADD mystream 789-* item 2 value b]
+ lassign [split $id2 -] _ seq
+ assert {$seq == 0}
+ assert {[streamCompareID $id1 $id2] == -1}
+ }
+
+ test {XADD auto-generated sequence can't be smaller than last ID} {
+ r DEL mystream
+ r XADD mystream 123-456 item 1 value a
+ assert_error ERR* {r XADD mystream 42-* item 2 value b}
+ }
+
+ test {XADD auto-generated sequence can't overflow} {
+ r DEL mystream
+ r xadd mystream 1-18446744073709551615 a b
+ assert_error ERR* {r xadd mystream 1-* c d}
+ }
+
+ test {XADD 0-* should succeed} {
+ r DEL mystream
+ set id [r xadd mystream 0-* a b]
+ lassign [split $id -] _ seq
+ assert {$seq == 1}
+ }
+
+ test {XADD with MAXLEN option} {
+ r DEL mystream
+ for {set j 0} {$j < 1000} {incr j} {
+ if {rand() < 0.9} {
+ r XADD mystream MAXLEN 5 * xitem $j
+ } else {
+ r XADD mystream MAXLEN 5 * yitem $j
+ }
+ }
+ assert {[r xlen mystream] == 5}
+ set res [r xrange mystream - +]
+ set expected 995
+ foreach r $res {
+ assert {[lindex $r 1 1] == $expected}
+ incr expected
+ }
+ }
+
+ test {XADD with MAXLEN option and the '=' argument} {
+ r DEL mystream
+ for {set j 0} {$j < 1000} {incr j} {
+ if {rand() < 0.9} {
+ r XADD mystream MAXLEN = 5 * xitem $j
+ } else {
+ r XADD mystream MAXLEN = 5 * yitem $j
+ }
+ }
+ assert {[r XLEN mystream] == 5}
+ }
+
+ test {XADD with MAXLEN option and the '~' argument} {
+ r DEL mystream
+ r config set stream-node-max-entries 100
+ for {set j 0} {$j < 1000} {incr j} {
+ if {rand() < 0.9} {
+ r XADD mystream MAXLEN ~ 555 * xitem $j
+ } else {
+ r XADD mystream MAXLEN ~ 555 * yitem $j
+ }
+ }
+ assert {[r XLEN mystream] == 600}
+ }
+
+ test {XADD with NOMKSTREAM option} {
+ r DEL mystream
+ assert_equal "" [r XADD mystream NOMKSTREAM * item 1 value a]
+ assert_equal 0 [r EXISTS mystream]
+ r XADD mystream * item 1 value a
+ r XADD mystream NOMKSTREAM * item 2 value b
+ assert_equal 2 [r XLEN mystream]
+ set items [r XRANGE mystream - +]
+ assert_equal [lindex $items 0 1] {item 1 value a}
+ assert_equal [lindex $items 1 1] {item 2 value b}
+ }
+
+ test {XADD with MINID option} {
+ r DEL mystream
+ for {set j 1} {$j < 1001} {incr j} {
+ set minid 1000
+ if {$j >= 5} {
+ set minid [expr {$j-5}]
+ }
+ if {rand() < 0.9} {
+ r XADD mystream MINID $minid $j xitem $j
+ } else {
+ r XADD mystream MINID $minid $j yitem $j
+ }
+ }
+ assert {[r xlen mystream] == 6}
+ set res [r xrange mystream - +]
+ set expected 995
+ foreach r $res {
+ assert {[lindex $r 1 1] == $expected}
+ incr expected
+ }
+ }
+
+ test {XTRIM with MINID option} {
+ r DEL mystream
+ r XADD mystream 1-0 f v
+ r XADD mystream 2-0 f v
+ r XADD mystream 3-0 f v
+ r XADD mystream 4-0 f v
+ r XADD mystream 5-0 f v
+ r XTRIM mystream MINID = 3-0
+ assert_equal [r XRANGE mystream - +] {{3-0 {f v}} {4-0 {f v}} {5-0 {f v}}}
+ }
+
+ test {XTRIM with MINID option, big delta from master record} {
+ r DEL mystream
+ r XADD mystream 1-0 f v
+ r XADD mystream 1641544570597-0 f v
+ r XADD mystream 1641544570597-1 f v
+ r XTRIM mystream MINID 1641544570597-0
+ assert_equal [r XRANGE mystream - +] {{1641544570597-0 {f v}} {1641544570597-1 {f v}}}
+ }
+
+ proc insert_into_stream_key {key {count 10000}} {
+ r multi
+ for {set j 0} {$j < $count} {incr j} {
+ # From time to time insert a field with a different set
+ # of fields in order to stress the stream compression code.
+ if {rand() < 0.9} {
+ r XADD $key * item $j
+ } else {
+ r XADD $key * item $j otherfield foo
+ }
+ }
+ r exec
+ }
+
+ test {XADD mass insertion and XLEN} {
+ r DEL mystream
+ insert_into_stream_key mystream
+
+ set items [r XRANGE mystream - +]
+ for {set j 0} {$j < 10000} {incr j} {
+ assert {[lrange [lindex $items $j 1] 0 1] eq [list item $j]}
+ }
+ assert {[r xlen mystream] == $j}
+ }
+
+ test {XADD with ID 0-0} {
+ r DEL otherstream
+ catch {r XADD otherstream 0-0 k v} err
+ assert {[r EXISTS otherstream] == 0}
+ }
+
+ test {XADD with LIMIT delete entries no more than limit} {
+ r del yourstream
+ for {set j 0} {$j < 3} {incr j} {
+ r XADD yourstream * xitem v
+ }
+ r XADD yourstream MAXLEN ~ 0 limit 1 * xitem v
+ assert {[r XLEN yourstream] == 4}
+ }
+
+ test {XRANGE COUNT works as expected} {
+ assert {[llength [r xrange mystream - + COUNT 10]] == 10}
+ }
+
+ test {XREVRANGE COUNT works as expected} {
+ assert {[llength [r xrevrange mystream + - COUNT 10]] == 10}
+ }
+
+ test {XRANGE can be used to iterate the whole stream} {
+ set last_id "-"
+ set j 0
+ while 1 {
+ set elements [r xrange mystream $last_id + COUNT 100]
+ if {[llength $elements] == 0} break
+ foreach e $elements {
+ assert {[lrange [lindex $e 1] 0 1] eq [list item $j]}
+ incr j;
+ }
+ set last_id [streamNextID [lindex $elements end 0]]
+ }
+ assert {$j == 10000}
+ }
+
+ test {XREVRANGE returns the reverse of XRANGE} {
+ assert {[r xrange mystream - +] == [lreverse [r xrevrange mystream + -]]}
+ }
+
+ test {XRANGE exclusive ranges} {
+ set ids {0-1 0-18446744073709551615 1-0 42-0 42-42
+ 18446744073709551615-18446744073709551614
+ 18446744073709551615-18446744073709551615}
+ set total [llength $ids]
+ r multi
+ r DEL vipstream
+ foreach id $ids {
+ r XADD vipstream $id foo bar
+ }
+ r exec
+ assert {[llength [r xrange vipstream - +]] == $total}
+ assert {[llength [r xrange vipstream ([lindex $ids 0] +]] == $total-1}
+ assert {[llength [r xrange vipstream - ([lindex $ids $total-1]]] == $total-1}
+ assert {[llength [r xrange vipstream (0-1 (1-0]] == 1}
+ assert {[llength [r xrange vipstream (1-0 (42-42]] == 1}
+ catch {r xrange vipstream (- +} e
+ assert_match {ERR*} $e
+ catch {r xrange vipstream - (+} e
+ assert_match {ERR*} $e
+ catch {r xrange vipstream (18446744073709551615-18446744073709551615 +} e
+ assert_match {ERR*} $e
+ catch {r xrange vipstream - (0-0} e
+ assert_match {ERR*} $e
+ }
+
+ test {XREAD with non empty stream} {
+ set res [r XREAD COUNT 1 STREAMS mystream 0-0]
+ assert {[lrange [lindex $res 0 1 0 1] 0 1] eq {item 0}}
+ }
+
+ test {Non blocking XREAD with empty streams} {
+ set res [r XREAD STREAMS s1{t} s2{t} 0-0 0-0]
+ assert {$res eq {}}
+ }
+
+ test {XREAD with non empty second stream} {
+ insert_into_stream_key mystream{t}
+ set res [r XREAD COUNT 1 STREAMS nostream{t} mystream{t} 0-0 0-0]
+ assert {[lindex $res 0 0] eq {mystream{t}}}
+ assert {[lrange [lindex $res 0 1 0 1] 0 1] eq {item 0}}
+ }
+
+ test {Blocking XREAD waiting new data} {
+ r XADD s2{t} * old abcd1234
+ set rd [redis_deferring_client]
+ $rd XREAD BLOCK 20000 STREAMS s1{t} s2{t} s3{t} $ $ $
+ wait_for_blocked_client
+ r XADD s2{t} * new abcd1234
+ set res [$rd read]
+ assert {[lindex $res 0 0] eq {s2{t}}}
+ assert {[lindex $res 0 1 0 1] eq {new abcd1234}}
+ $rd close
+ }
+
+ test {Blocking XREAD waiting old data} {
+ set rd [redis_deferring_client]
+ $rd XREAD BLOCK 20000 STREAMS s1{t} s2{t} s3{t} $ 0-0 $
+ r XADD s2{t} * foo abcd1234
+ set res [$rd read]
+ assert {[lindex $res 0 0] eq {s2{t}}}
+ assert {[lindex $res 0 1 0 1] eq {old abcd1234}}
+ $rd close
+ }
+
+ test {Blocking XREAD will not reply with an empty array} {
+ r del s1
+ r XADD s1 666 f v
+ r XADD s1 667 f2 v2
+ r XDEL s1 667
+ set rd [redis_deferring_client]
+ $rd XREAD BLOCK 10 STREAMS s1 666
+ after 20
+ assert {[$rd read] == {}} ;# before the fix, client didn't even block, but was served synchronously with {s1 {}}
+ $rd close
+ }
+
+ test "Blocking XREAD for stream that ran dry (issue #5299)" {
+ set rd [redis_deferring_client]
+
+ # Add a entry then delete it, now stream's last_id is 666.
+ r DEL mystream
+ r XADD mystream 666 key value
+ r XDEL mystream 666
+
+ # Pass a ID smaller than stream's last_id, released on timeout.
+ $rd XREAD BLOCK 10 STREAMS mystream 665
+ assert_equal [$rd read] {}
+
+ # Throw an error if the ID equal or smaller than the last_id.
+ assert_error ERR*equal*smaller* {r XADD mystream 665 key value}
+ assert_error ERR*equal*smaller* {r XADD mystream 666 key value}
+
+ # Entered blocking state and then release because of the new entry.
+ $rd XREAD BLOCK 0 STREAMS mystream 665
+ wait_for_blocked_clients_count 1
+ r XADD mystream 667 key value
+ assert_equal [$rd read] {{mystream {{667-0 {key value}}}}}
+
+ $rd close
+ }
+
+ test "XREAD: XADD + DEL should not awake client" {
+ set rd [redis_deferring_client]
+ r del s1
+ $rd XREAD BLOCK 20000 STREAMS s1 $
+ wait_for_blocked_clients_count 1
+ r multi
+ r XADD s1 * old abcd1234
+ r DEL s1
+ r exec
+ r XADD s1 * new abcd1234
+ set res [$rd read]
+ assert {[lindex $res 0 0] eq {s1}}
+ assert {[lindex $res 0 1 0 1] eq {new abcd1234}}
+ $rd close
+ }
+
+ test "XREAD: XADD + DEL + LPUSH should not awake client" {
+ set rd [redis_deferring_client]
+ r del s1
+ $rd XREAD BLOCK 20000 STREAMS s1 $
+ wait_for_blocked_clients_count 1
+ r multi
+ r XADD s1 * old abcd1234
+ r DEL s1
+ r LPUSH s1 foo bar
+ r exec
+ r DEL s1
+ r XADD s1 * new abcd1234
+ set res [$rd read]
+ assert {[lindex $res 0 0] eq {s1}}
+ assert {[lindex $res 0 1 0 1] eq {new abcd1234}}
+ $rd close
+ }
+
+ test {XREAD with same stream name multiple times should work} {
+ r XADD s2 * old abcd1234
+ set rd [redis_deferring_client]
+ $rd XREAD BLOCK 20000 STREAMS s2 s2 s2 $ $ $
+ wait_for_blocked_clients_count 1
+ r XADD s2 * new abcd1234
+ set res [$rd read]
+ assert {[lindex $res 0 0] eq {s2}}
+ assert {[lindex $res 0 1 0 1] eq {new abcd1234}}
+ $rd close
+ }
+
+ test {XREAD + multiple XADD inside transaction} {
+ r XADD s2 * old abcd1234
+ set rd [redis_deferring_client]
+ $rd XREAD BLOCK 20000 STREAMS s2 s2 s2 $ $ $
+ wait_for_blocked_clients_count 1
+ r MULTI
+ r XADD s2 * field one
+ r XADD s2 * field two
+ r XADD s2 * field three
+ r EXEC
+ set res [$rd read]
+ assert {[lindex $res 0 0] eq {s2}}
+ assert {[lindex $res 0 1 0 1] eq {field one}}
+ assert {[lindex $res 0 1 1 1] eq {field two}}
+ $rd close
+ }
+
+ test {XDEL basic test} {
+ r del somestream
+ r xadd somestream * foo value0
+ set id [r xadd somestream * foo value1]
+ r xadd somestream * foo value2
+ r xdel somestream $id
+ assert {[r xlen somestream] == 2}
+ set result [r xrange somestream - +]
+ assert {[lindex $result 0 1 1] eq {value0}}
+ assert {[lindex $result 1 1 1] eq {value2}}
+ }
+
+ test {XDEL multiply id test} {
+ r del somestream
+ r xadd somestream 1-1 a 1
+ r xadd somestream 1-2 b 2
+ r xadd somestream 1-3 c 3
+ r xadd somestream 1-4 d 4
+ r xadd somestream 1-5 e 5
+ assert {[r xlen somestream] == 5}
+ assert {[r xdel somestream 1-1 1-4 1-5 2-1] == 3}
+ assert {[r xlen somestream] == 2}
+ set result [r xrange somestream - +]
+ assert {[dict get [lindex $result 0 1] b] eq {2}}
+ assert {[dict get [lindex $result 1 1] c] eq {3}}
+ }
+ # Here the idea is to check the consistency of the stream data structure
+ # as we remove all the elements down to zero elements.
+ test {XDEL fuzz test} {
+ r del somestream
+ set ids {}
+ set x 0; # Length of the stream
+ while 1 {
+ lappend ids [r xadd somestream * item $x]
+ incr x
+ # Add enough elements to have a few radix tree nodes inside the stream.
+ if {[dict get [r xinfo stream somestream] radix-tree-keys] > 20} break
+ }
+
+ # Now remove all the elements till we reach an empty stream
+ # and after every deletion, check that the stream is sane enough
+ # to report the right number of elements with XRANGE: this will also
+ # force accessing the whole data structure to check sanity.
+ assert {[r xlen somestream] == $x}
+
+ # We want to remove elements in random order to really test the
+ # implementation in a better way.
+ set ids [lshuffle $ids]
+ foreach id $ids {
+ assert {[r xdel somestream $id] == 1}
+ incr x -1
+ assert {[r xlen somestream] == $x}
+ # The test would be too slow calling XRANGE for every iteration.
+ # Do it every 100 removal.
+ if {$x % 100 == 0} {
+ set res [r xrange somestream - +]
+ assert {[llength $res] == $x}
+ }
+ }
+ }
+
+ test {XRANGE fuzzing} {
+ set items [r XRANGE mystream{t} - +]
+ set low_id [lindex $items 0 0]
+ set high_id [lindex $items end 0]
+ for {set j 0} {$j < 100} {incr j} {
+ set start [streamRandomID $low_id $high_id]
+ set end [streamRandomID $low_id $high_id]
+ set range [r xrange mystream{t} $start $end]
+ set tcl_range [streamSimulateXRANGE $items $start $end]
+ if {$range ne $tcl_range} {
+ puts "*** WARNING *** - XRANGE fuzzing mismatch: $start - $end"
+ puts "---"
+ puts "XRANGE: '$range'"
+ puts "---"
+ puts "TCL: '$tcl_range'"
+ puts "---"
+ fail "XRANGE fuzzing failed, check logs for details"
+ }
+ }
+ }
+
+ test {XREVRANGE regression test for issue #5006} {
+ # Add non compressed entries
+ r xadd teststream 1234567891230 key1 value1
+ r xadd teststream 1234567891240 key2 value2
+ r xadd teststream 1234567891250 key3 value3
+
+ # Add SAMEFIELD compressed entries
+ r xadd teststream2 1234567891230 key1 value1
+ r xadd teststream2 1234567891240 key1 value2
+ r xadd teststream2 1234567891250 key1 value3
+
+ assert_equal [r xrevrange teststream 1234567891245 -] {{1234567891240-0 {key2 value2}} {1234567891230-0 {key1 value1}}}
+
+ assert_equal [r xrevrange teststream2 1234567891245 -] {{1234567891240-0 {key1 value2}} {1234567891230-0 {key1 value1}}}
+ }
+
+ test {XREAD streamID edge (no-blocking)} {
+ r del x
+ r XADD x 1-1 f v
+ r XADD x 1-18446744073709551615 f v
+ r XADD x 2-1 f v
+ set res [r XREAD BLOCK 0 STREAMS x 1-18446744073709551615]
+ assert {[lindex $res 0 1 0] == {2-1 {f v}}}
+ }
+
+ test {XREAD streamID edge (blocking)} {
+ r del x
+ set rd [redis_deferring_client]
+ $rd XREAD BLOCK 0 STREAMS x 1-18446744073709551615
+ wait_for_blocked_clients_count 1
+ r XADD x 1-1 f v
+ r XADD x 1-18446744073709551615 f v
+ r XADD x 2-1 f v
+ set res [$rd read]
+ assert {[lindex $res 0 1 0] == {2-1 {f v}}}
+ $rd close
+ }
+
+ test {XADD streamID edge} {
+ r del x
+ r XADD x 2577343934890-18446744073709551615 f v ;# we need the timestamp to be in the future
+ r XADD x * f2 v2
+ assert_equal [r XRANGE x - +] {{2577343934890-18446744073709551615 {f v}} {2577343934891-0 {f2 v2}}}
+ }
+
+ test {XTRIM with MAXLEN option basic test} {
+ r DEL mystream
+ for {set j 0} {$j < 1000} {incr j} {
+ if {rand() < 0.9} {
+ r XADD mystream * xitem $j
+ } else {
+ r XADD mystream * yitem $j
+ }
+ }
+ r XTRIM mystream MAXLEN 666
+ assert {[r XLEN mystream] == 666}
+ r XTRIM mystream MAXLEN = 555
+ assert {[r XLEN mystream] == 555}
+ r XTRIM mystream MAXLEN ~ 444
+ assert {[r XLEN mystream] == 500}
+ r XTRIM mystream MAXLEN ~ 400
+ assert {[r XLEN mystream] == 400}
+ }
+
+ test {XADD with LIMIT consecutive calls} {
+ r del mystream
+ r config set stream-node-max-entries 10
+ for {set j 0} {$j < 100} {incr j} {
+ r XADD mystream * xitem v
+ }
+ r XADD mystream MAXLEN ~ 55 LIMIT 30 * xitem v
+ assert {[r xlen mystream] == 71}
+ r XADD mystream MAXLEN ~ 55 LIMIT 30 * xitem v
+ assert {[r xlen mystream] == 62}
+ r config set stream-node-max-entries 100
+ }
+
+ test {XTRIM with ~ is limited} {
+ r del mystream
+ r config set stream-node-max-entries 1
+ for {set j 0} {$j < 102} {incr j} {
+ r XADD mystream * xitem v
+ }
+ r XTRIM mystream MAXLEN ~ 1
+ assert {[r xlen mystream] == 2}
+ r config set stream-node-max-entries 100
+ }
+
+ test {XTRIM without ~ is not limited} {
+ r del mystream
+ r config set stream-node-max-entries 1
+ for {set j 0} {$j < 102} {incr j} {
+ r XADD mystream * xitem v
+ }
+ r XTRIM mystream MAXLEN 1
+ assert {[r xlen mystream] == 1}
+ r config set stream-node-max-entries 100
+ }
+
+ test {XTRIM without ~ and with LIMIT} {
+ r del mystream
+ r config set stream-node-max-entries 1
+ for {set j 0} {$j < 102} {incr j} {
+ r XADD mystream * xitem v
+ }
+ assert_error ERR* {r XTRIM mystream MAXLEN 1 LIMIT 30}
+ }
+
+ test {XTRIM with LIMIT delete entries no more than limit} {
+ r del mystream
+ r config set stream-node-max-entries 2
+ for {set j 0} {$j < 3} {incr j} {
+ r XADD mystream * xitem v
+ }
+ assert {[r XTRIM mystream MAXLEN ~ 0 LIMIT 1] == 0}
+ assert {[r XTRIM mystream MAXLEN ~ 0 LIMIT 2] == 2}
+ }
+}
+
+start_server {tags {"stream needs:debug"} overrides {appendonly yes}} {
+ test {XADD with MAXLEN > xlen can propagate correctly} {
+ for {set j 0} {$j < 100} {incr j} {
+ r XADD mystream * xitem v
+ }
+ r XADD mystream MAXLEN 200 * xitem v
+ incr j
+ assert {[r xlen mystream] == $j}
+ r debug loadaof
+ r XADD mystream * xitem v
+ incr j
+ assert {[r xlen mystream] == $j}
+ }
+}
+
+start_server {tags {"stream needs:debug"} overrides {appendonly yes}} {
+ test {XADD with MINID > lastid can propagate correctly} {
+ for {set j 0} {$j < 100} {incr j} {
+ set id [expr {$j+1}]
+ r XADD mystream $id xitem v
+ }
+ r XADD mystream MINID 1 * xitem v
+ incr j
+ assert {[r xlen mystream] == $j}
+ r debug loadaof
+ r XADD mystream * xitem v
+ incr j
+ assert {[r xlen mystream] == $j}
+ }
+}
+
+start_server {tags {"stream needs:debug"} overrides {appendonly yes stream-node-max-entries 100}} {
+ test {XADD with ~ MAXLEN can propagate correctly} {
+ for {set j 0} {$j < 100} {incr j} {
+ r XADD mystream * xitem v
+ }
+ r XADD mystream MAXLEN ~ $j * xitem v
+ incr j
+ assert {[r xlen mystream] == $j}
+ r config set stream-node-max-entries 1
+ r debug loadaof
+ r XADD mystream * xitem v
+ incr j
+ assert {[r xlen mystream] == $j}
+ }
+}
+
+start_server {tags {"stream needs:debug"} overrides {appendonly yes stream-node-max-entries 10}} {
+ test {XADD with ~ MAXLEN and LIMIT can propagate correctly} {
+ for {set j 0} {$j < 100} {incr j} {
+ r XADD mystream * xitem v
+ }
+ r XADD mystream MAXLEN ~ 55 LIMIT 30 * xitem v
+ assert {[r xlen mystream] == 71}
+ r config set stream-node-max-entries 1
+ r debug loadaof
+ r XADD mystream * xitem v
+ assert {[r xlen mystream] == 72}
+ }
+}
+
+start_server {tags {"stream needs:debug"} overrides {appendonly yes stream-node-max-entries 100}} {
+ test {XADD with ~ MINID can propagate correctly} {
+ for {set j 0} {$j < 100} {incr j} {
+ set id [expr {$j+1}]
+ r XADD mystream $id xitem v
+ }
+ r XADD mystream MINID ~ $j * xitem v
+ incr j
+ assert {[r xlen mystream] == $j}
+ r config set stream-node-max-entries 1
+ r debug loadaof
+ r XADD mystream * xitem v
+ incr j
+ assert {[r xlen mystream] == $j}
+ }
+}
+
+start_server {tags {"stream needs:debug"} overrides {appendonly yes stream-node-max-entries 10}} {
+ test {XADD with ~ MINID and LIMIT can propagate correctly} {
+ for {set j 0} {$j < 100} {incr j} {
+ set id [expr {$j+1}]
+ r XADD mystream $id xitem v
+ }
+ r XADD mystream MINID ~ 55 LIMIT 30 * xitem v
+ assert {[r xlen mystream] == 71}
+ r config set stream-node-max-entries 1
+ r debug loadaof
+ r XADD mystream * xitem v
+ assert {[r xlen mystream] == 72}
+ }
+}
+
+start_server {tags {"stream needs:debug"} overrides {appendonly yes stream-node-max-entries 10}} {
+ test {XTRIM with ~ MAXLEN can propagate correctly} {
+ for {set j 0} {$j < 100} {incr j} {
+ r XADD mystream * xitem v
+ }
+ r XTRIM mystream MAXLEN ~ 85
+ assert {[r xlen mystream] == 90}
+ r config set stream-node-max-entries 1
+ r debug loadaof
+ r XADD mystream * xitem v
+ incr j
+ assert {[r xlen mystream] == 91}
+ }
+}
+
+start_server {tags {"stream"}} {
+ test {XADD can CREATE an empty stream} {
+ r XADD mystream MAXLEN 0 * a b
+ assert {[dict get [r xinfo stream mystream] length] == 0}
+ }
+
+ test {XSETID can set a specific ID} {
+ r XSETID mystream "200-0"
+ set reply [r XINFO stream mystream]
+ assert_equal [dict get $reply last-generated-id] "200-0"
+ assert_equal [dict get $reply entries-added] 1
+ }
+
+ test {XSETID cannot SETID with smaller ID} {
+ r XADD mystream * a b
+ catch {r XSETID mystream "1-1"} err
+ r XADD mystream MAXLEN 0 * a b
+ set err
+ } {ERR *smaller*}
+
+ test {XSETID cannot SETID on non-existent key} {
+ catch {r XSETID stream 1-1} err
+ set _ $err
+ } {ERR no such key}
+
+ test {XSETID cannot run with an offset but without a maximal tombstone} {
+ catch {r XSETID stream 1-1 0} err
+ set _ $err
+ } {ERR syntax error}
+
+ test {XSETID cannot run with a maximal tombstone but without an offset} {
+ catch {r XSETID stream 1-1 0-0} err
+ set _ $err
+ } {ERR syntax error}
+
+ test {XSETID errors on negstive offset} {
+ catch {r XSETID stream 1-1 ENTRIESADDED -1 MAXDELETEDID 0-0} err
+ set _ $err
+ } {ERR *must be positive}
+
+ test {XSETID cannot set the maximal tombstone with larger ID} {
+ r DEL x
+ r XADD x 1-0 a b
+
+ catch {r XSETID x "1-0" ENTRIESADDED 1 MAXDELETEDID "2-0" } err
+ r XADD mystream MAXLEN 0 * a b
+ set err
+ } {ERR *smaller*}
+
+ test {XSETID cannot set the offset to less than the length} {
+ r DEL x
+ r XADD x 1-0 a b
+
+ catch {r XSETID x "1-0" ENTRIESADDED 0 MAXDELETEDID "0-0" } err
+ r XADD mystream MAXLEN 0 * a b
+ set err
+ } {ERR *smaller*}
+
+ test {XSETID cannot set smaller ID than current MAXDELETEDID} {
+ r DEL x
+ r XADD x 1-1 a 1
+ r XADD x 1-2 b 2
+ r XADD x 1-3 c 3
+ r XDEL x 1-2
+ r XDEL x 1-3
+ set reply [r XINFO stream x]
+ assert_equal [dict get $reply max-deleted-entry-id] "1-3"
+ catch {r XSETID x "1-2" } err
+ set err
+ } {ERR *smaller*}
+}
+
+start_server {tags {"stream"}} {
+ test {XADD advances the entries-added counter and sets the recorded-first-entry-id} {
+ r DEL x
+ r XADD x 1-0 data a
+
+ set reply [r XINFO STREAM x FULL]
+ assert_equal [dict get $reply entries-added] 1
+ assert_equal [dict get $reply recorded-first-entry-id] "1-0"
+
+ r XADD x 2-0 data a
+ set reply [r XINFO STREAM x FULL]
+ assert_equal [dict get $reply entries-added] 2
+ assert_equal [dict get $reply recorded-first-entry-id] "1-0"
+ }
+
+ test {XDEL/TRIM are reflected by recorded first entry} {
+ r DEL x
+ r XADD x 1-0 data a
+ r XADD x 2-0 data a
+ r XADD x 3-0 data a
+ r XADD x 4-0 data a
+ r XADD x 5-0 data a
+
+ set reply [r XINFO STREAM x FULL]
+ assert_equal [dict get $reply entries-added] 5
+ assert_equal [dict get $reply recorded-first-entry-id] "1-0"
+
+ r XDEL x 2-0
+ set reply [r XINFO STREAM x FULL]
+ assert_equal [dict get $reply recorded-first-entry-id] "1-0"
+
+ r XDEL x 1-0
+ set reply [r XINFO STREAM x FULL]
+ assert_equal [dict get $reply recorded-first-entry-id] "3-0"
+
+ r XTRIM x MAXLEN = 2
+ set reply [r XINFO STREAM x FULL]
+ assert_equal [dict get $reply recorded-first-entry-id] "4-0"
+ }
+
+ test {Maximum XDEL ID behaves correctly} {
+ r DEL x
+ r XADD x 1-0 data a
+ r XADD x 2-0 data b
+ r XADD x 3-0 data c
+
+ set reply [r XINFO STREAM x FULL]
+ assert_equal [dict get $reply max-deleted-entry-id] "0-0"
+
+ r XDEL x 2-0
+ set reply [r XINFO STREAM x FULL]
+ assert_equal [dict get $reply max-deleted-entry-id] "2-0"
+
+ r XDEL x 1-0
+ set reply [r XINFO STREAM x FULL]
+ assert_equal [dict get $reply max-deleted-entry-id] "2-0"
+ }
+
+ test {XADD with artial ID with maximal seq} {
+ r DEL x
+ r XADD x 1-18446744073709551615 f1 v1
+ assert_error {*The ID specified in XADD is equal or smaller*} {r XADD x 1-* f2 v2}
+ }
+}
+
+start_server {tags {"stream needs:debug"} overrides {appendonly yes aof-use-rdb-preamble no}} {
+ test {Empty stream can be rewrite into AOF correctly} {
+ r XADD mystream MAXLEN 0 * a b
+ assert {[dict get [r xinfo stream mystream] length] == 0}
+ r bgrewriteaof
+ waitForBgrewriteaof r
+ r debug loadaof
+ assert {[dict get [r xinfo stream mystream] length] == 0}
+ }
+
+ test {Stream can be rewrite into AOF correctly after XDEL lastid} {
+ r XSETID mystream 0-0
+ r XADD mystream 1-1 a b
+ r XADD mystream 2-2 a b
+ assert {[dict get [r xinfo stream mystream] length] == 2}
+ r XDEL mystream 2-2
+ r bgrewriteaof
+ waitForBgrewriteaof r
+ r debug loadaof
+ assert {[dict get [r xinfo stream mystream] length] == 1}
+ assert_equal [dict get [r xinfo stream mystream] last-generated-id] "2-2"
+ }
+}
+
+start_server {tags {"stream"}} {
+ test {XGROUP HELP should not have unexpected options} {
+ catch {r XGROUP help xxx} e
+ assert_match "*wrong number of arguments for 'xgroup|help' command" $e
+ }
+
+ test {XINFO HELP should not have unexpected options} {
+ catch {r XINFO help xxx} e
+ assert_match "*wrong number of arguments for 'xinfo|help' command" $e
+ }
+}
diff --git a/tests/unit/type/string.tcl b/tests/unit/type/string.tcl
new file mode 100644
index 0000000..94702ec
--- /dev/null
+++ b/tests/unit/type/string.tcl
@@ -0,0 +1,674 @@
+start_server {tags {"string"}} {
+ test {SET and GET an item} {
+ r set x foobar
+ r get x
+ } {foobar}
+
+ test {SET and GET an empty item} {
+ r set x {}
+ r get x
+ } {}
+
+ test {Very big payload in GET/SET} {
+ set buf [string repeat "abcd" 1000000]
+ r set foo $buf
+ r get foo
+ } [string repeat "abcd" 1000000]
+
+ tags {"slow"} {
+ test {Very big payload random access} {
+ set err {}
+ array set payload {}
+ for {set j 0} {$j < 100} {incr j} {
+ set size [expr 1+[randomInt 100000]]
+ set buf [string repeat "pl-$j" $size]
+ set payload($j) $buf
+ r set bigpayload_$j $buf
+ }
+ for {set j 0} {$j < 1000} {incr j} {
+ set index [randomInt 100]
+ set buf [r get bigpayload_$index]
+ if {$buf != $payload($index)} {
+ set err "Values differ: I set '$payload($index)' but I read back '$buf'"
+ break
+ }
+ }
+ unset payload
+ set _ $err
+ } {}
+
+ test {SET 10000 numeric keys and access all them in reverse order} {
+ r flushdb
+ set err {}
+ for {set x 0} {$x < 10000} {incr x} {
+ r set $x $x
+ }
+ set sum 0
+ for {set x 9999} {$x >= 0} {incr x -1} {
+ set val [r get $x]
+ if {$val ne $x} {
+ set err "Element at position $x is $val instead of $x"
+ break
+ }
+ }
+ set _ $err
+ } {}
+
+ test {DBSIZE should be 10000 now} {
+ r dbsize
+ } {10000}
+ }
+
+ test "SETNX target key missing" {
+ r del novar
+ assert_equal 1 [r setnx novar foobared]
+ assert_equal "foobared" [r get novar]
+ }
+
+ test "SETNX target key exists" {
+ r set novar foobared
+ assert_equal 0 [r setnx novar blabla]
+ assert_equal "foobared" [r get novar]
+ }
+
+ test "SETNX against not-expired volatile key" {
+ r set x 10
+ r expire x 10000
+ assert_equal 0 [r setnx x 20]
+ assert_equal 10 [r get x]
+ }
+
+ test "SETNX against expired volatile key" {
+ # Make it very unlikely for the key this test uses to be expired by the
+ # active expiry cycle. This is tightly coupled to the implementation of
+ # active expiry and dbAdd() but currently the only way to test that
+ # SETNX expires a key when it should have been.
+ for {set x 0} {$x < 9999} {incr x} {
+ r setex key-$x 3600 value
+ }
+
+ # This will be one of 10000 expiring keys. A cycle is executed every
+ # 100ms, sampling 10 keys for being expired or not. This key will be
+ # expired for at most 1s when we wait 2s, resulting in a total sample
+ # of 100 keys. The probability of the success of this test being a
+ # false positive is therefore approx. 1%.
+ r set x 10
+ r expire x 1
+
+ # Wait for the key to expire
+ after 2000
+
+ assert_equal 1 [r setnx x 20]
+ assert_equal 20 [r get x]
+ }
+
+ test "GETEX EX option" {
+ r del foo
+ r set foo bar
+ r getex foo ex 10
+ assert_range [r ttl foo] 5 10
+ }
+
+ test "GETEX PX option" {
+ r del foo
+ r set foo bar
+ r getex foo px 10000
+ assert_range [r pttl foo] 5000 10000
+ }
+
+ test "GETEX EXAT option" {
+ r del foo
+ r set foo bar
+ r getex foo exat [expr [clock seconds] + 10]
+ assert_range [r ttl foo] 5 10
+ }
+
+ test "GETEX PXAT option" {
+ r del foo
+ r set foo bar
+ r getex foo pxat [expr [clock milliseconds] + 10000]
+ assert_range [r pttl foo] 5000 10000
+ }
+
+ test "GETEX PERSIST option" {
+ r del foo
+ r set foo bar ex 10
+ assert_range [r ttl foo] 5 10
+ r getex foo persist
+ assert_equal -1 [r ttl foo]
+ }
+
+ test "GETEX no option" {
+ r del foo
+ r set foo bar
+ r getex foo
+ assert_equal bar [r getex foo]
+ }
+
+ test "GETEX syntax errors" {
+ set ex {}
+ catch {r getex foo non-existent-option} ex
+ set ex
+ } {*syntax*}
+
+ test "GETEX and GET expired key or not exist" {
+ r del foo
+ r set foo bar px 1
+ after 2
+ assert_equal {} [r getex foo]
+ assert_equal {} [r get foo]
+ }
+
+ test "GETEX no arguments" {
+ set ex {}
+ catch {r getex} ex
+ set ex
+ } {*wrong number of arguments for 'getex' command}
+
+ test "GETDEL command" {
+ r del foo
+ r set foo bar
+ assert_equal bar [r getdel foo ]
+ assert_equal {} [r getdel foo ]
+ }
+
+ test {GETDEL propagate as DEL command to replica} {
+ set repl [attach_to_replication_stream]
+ r set foo bar
+ r getdel foo
+ assert_replication_stream $repl {
+ {select *}
+ {set foo bar}
+ {del foo}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test {GETEX without argument does not propagate to replica} {
+ set repl [attach_to_replication_stream]
+ r set foo bar
+ r getex foo
+ r del foo
+ assert_replication_stream $repl {
+ {select *}
+ {set foo bar}
+ {del foo}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test {MGET} {
+ r flushdb
+ r set foo{t} BAR
+ r set bar{t} FOO
+ r mget foo{t} bar{t}
+ } {BAR FOO}
+
+ test {MGET against non existing key} {
+ r mget foo{t} baazz{t} bar{t}
+ } {BAR {} FOO}
+
+ test {MGET against non-string key} {
+ r sadd myset{t} ciao
+ r sadd myset{t} bau
+ r mget foo{t} baazz{t} bar{t} myset{t}
+ } {BAR {} FOO {}}
+
+ test {GETSET (set new value)} {
+ r del foo
+ list [r getset foo xyz] [r get foo]
+ } {{} xyz}
+
+ test {GETSET (replace old value)} {
+ r set foo bar
+ list [r getset foo xyz] [r get foo]
+ } {bar xyz}
+
+ test {MSET base case} {
+ r mset x{t} 10 y{t} "foo bar" z{t} "x x x x x x x\n\n\r\n"
+ r mget x{t} y{t} z{t}
+ } [list 10 {foo bar} "x x x x x x x\n\n\r\n"]
+
+ test {MSET/MSETNX wrong number of args} {
+ assert_error {*wrong number of arguments for 'mset' command} {r mset x{t} 10 y{t} "foo bar" z{t}}
+ assert_error {*wrong number of arguments for 'msetnx' command} {r msetnx x{t} 20 y{t} "foo bar" z{t}}
+ }
+
+ test {MSET with already existing - same key twice} {
+ r set x{t} x
+ list [r mset x{t} xxx x{t} yyy] [r get x{t}]
+ } {OK yyy}
+
+ test {MSETNX with already existent key} {
+ list [r msetnx x1{t} xxx y2{t} yyy x{t} 20] [r exists x1{t}] [r exists y2{t}]
+ } {0 0 0}
+
+ test {MSETNX with not existing keys} {
+ list [r msetnx x1{t} xxx y2{t} yyy] [r get x1{t}] [r get y2{t}]
+ } {1 xxx yyy}
+
+ test {MSETNX with not existing keys - same key twice} {
+ r del x1{t}
+ list [r msetnx x1{t} xxx x1{t} yyy] [r get x1{t}]
+ } {1 yyy}
+
+ test {MSETNX with already existing keys - same key twice} {
+ list [r msetnx x1{t} xxx x1{t} zzz] [r get x1{t}]
+ } {0 yyy}
+
+ test "STRLEN against non-existing key" {
+ assert_equal 0 [r strlen notakey]
+ }
+
+ test "STRLEN against integer-encoded value" {
+ r set myinteger -555
+ assert_equal 4 [r strlen myinteger]
+ }
+
+ test "STRLEN against plain string" {
+ r set mystring "foozzz0123456789 baz"
+ assert_equal 20 [r strlen mystring]
+ }
+
+ test "SETBIT against non-existing key" {
+ r del mykey
+ assert_equal 0 [r setbit mykey 1 1]
+ assert_equal [binary format B* 01000000] [r get mykey]
+ }
+
+ test "SETBIT against string-encoded key" {
+ # Ascii "@" is integer 64 = 01 00 00 00
+ r set mykey "@"
+
+ assert_equal 0 [r setbit mykey 2 1]
+ assert_equal [binary format B* 01100000] [r get mykey]
+ assert_equal 1 [r setbit mykey 1 0]
+ assert_equal [binary format B* 00100000] [r get mykey]
+ }
+
+ test "SETBIT against integer-encoded key" {
+ # Ascii "1" is integer 49 = 00 11 00 01
+ r set mykey 1
+ assert_encoding int mykey
+
+ assert_equal 0 [r setbit mykey 6 1]
+ assert_equal [binary format B* 00110011] [r get mykey]
+ assert_equal 1 [r setbit mykey 2 0]
+ assert_equal [binary format B* 00010011] [r get mykey]
+ }
+
+ test "SETBIT against key with wrong type" {
+ r del mykey
+ r lpush mykey "foo"
+ assert_error "WRONGTYPE*" {r setbit mykey 0 1}
+ }
+
+ test "SETBIT with out of range bit offset" {
+ r del mykey
+ assert_error "*out of range*" {r setbit mykey [expr 4*1024*1024*1024] 1}
+ assert_error "*out of range*" {r setbit mykey -1 1}
+ }
+
+ test "SETBIT with non-bit argument" {
+ r del mykey
+ assert_error "*out of range*" {r setbit mykey 0 -1}
+ assert_error "*out of range*" {r setbit mykey 0 2}
+ assert_error "*out of range*" {r setbit mykey 0 10}
+ assert_error "*out of range*" {r setbit mykey 0 20}
+ }
+
+ test "SETBIT fuzzing" {
+ set str ""
+ set len [expr 256*8]
+ r del mykey
+
+ for {set i 0} {$i < 2000} {incr i} {
+ set bitnum [randomInt $len]
+ set bitval [randomInt 2]
+ set fmt [format "%%-%ds%%d%%-s" $bitnum]
+ set head [string range $str 0 $bitnum-1]
+ set tail [string range $str $bitnum+1 end]
+ set str [string map {" " 0} [format $fmt $head $bitval $tail]]
+
+ r setbit mykey $bitnum $bitval
+ assert_equal [binary format B* $str] [r get mykey]
+ }
+ }
+
+ test "GETBIT against non-existing key" {
+ r del mykey
+ assert_equal 0 [r getbit mykey 0]
+ }
+
+ test "GETBIT against string-encoded key" {
+ # Single byte with 2nd and 3rd bit set
+ r set mykey "`"
+
+ # In-range
+ assert_equal 0 [r getbit mykey 0]
+ assert_equal 1 [r getbit mykey 1]
+ assert_equal 1 [r getbit mykey 2]
+ assert_equal 0 [r getbit mykey 3]
+
+ # Out-range
+ assert_equal 0 [r getbit mykey 8]
+ assert_equal 0 [r getbit mykey 100]
+ assert_equal 0 [r getbit mykey 10000]
+ }
+
+ test "GETBIT against integer-encoded key" {
+ r set mykey 1
+ assert_encoding int mykey
+
+ # Ascii "1" is integer 49 = 00 11 00 01
+ assert_equal 0 [r getbit mykey 0]
+ assert_equal 0 [r getbit mykey 1]
+ assert_equal 1 [r getbit mykey 2]
+ assert_equal 1 [r getbit mykey 3]
+
+ # Out-range
+ assert_equal 0 [r getbit mykey 8]
+ assert_equal 0 [r getbit mykey 100]
+ assert_equal 0 [r getbit mykey 10000]
+ }
+
+ test "SETRANGE against non-existing key" {
+ r del mykey
+ assert_equal 3 [r setrange mykey 0 foo]
+ assert_equal "foo" [r get mykey]
+
+ r del mykey
+ assert_equal 0 [r setrange mykey 0 ""]
+ assert_equal 0 [r exists mykey]
+
+ r del mykey
+ assert_equal 4 [r setrange mykey 1 foo]
+ assert_equal "\000foo" [r get mykey]
+ }
+
+ test "SETRANGE against string-encoded key" {
+ r set mykey "foo"
+ assert_equal 3 [r setrange mykey 0 b]
+ assert_equal "boo" [r get mykey]
+
+ r set mykey "foo"
+ assert_equal 3 [r setrange mykey 0 ""]
+ assert_equal "foo" [r get mykey]
+
+ r set mykey "foo"
+ assert_equal 3 [r setrange mykey 1 b]
+ assert_equal "fbo" [r get mykey]
+
+ r set mykey "foo"
+ assert_equal 7 [r setrange mykey 4 bar]
+ assert_equal "foo\000bar" [r get mykey]
+ }
+
+ test "SETRANGE against integer-encoded key" {
+ r set mykey 1234
+ assert_encoding int mykey
+ assert_equal 4 [r setrange mykey 0 2]
+ assert_encoding raw mykey
+ assert_equal 2234 [r get mykey]
+
+ # Shouldn't change encoding when nothing is set
+ r set mykey 1234
+ assert_encoding int mykey
+ assert_equal 4 [r setrange mykey 0 ""]
+ assert_encoding int mykey
+ assert_equal 1234 [r get mykey]
+
+ r set mykey 1234
+ assert_encoding int mykey
+ assert_equal 4 [r setrange mykey 1 3]
+ assert_encoding raw mykey
+ assert_equal 1334 [r get mykey]
+
+ r set mykey 1234
+ assert_encoding int mykey
+ assert_equal 6 [r setrange mykey 5 2]
+ assert_encoding raw mykey
+ assert_equal "1234\0002" [r get mykey]
+ }
+
+ test "SETRANGE against key with wrong type" {
+ r del mykey
+ r lpush mykey "foo"
+ assert_error "WRONGTYPE*" {r setrange mykey 0 bar}
+ }
+
+ test "SETRANGE with out of range offset" {
+ r del mykey
+ assert_error "*maximum allowed size*" {r setrange mykey [expr 512*1024*1024-4] world}
+
+ r set mykey "hello"
+ assert_error "*out of range*" {r setrange mykey -1 world}
+ assert_error "*maximum allowed size*" {r setrange mykey [expr 512*1024*1024-4] world}
+ }
+
+ test "GETRANGE against non-existing key" {
+ r del mykey
+ assert_equal "" [r getrange mykey 0 -1]
+ }
+
+ test "GETRANGE against wrong key type" {
+ r lpush lkey1 "list"
+ assert_error {WRONGTYPE Operation against a key holding the wrong kind of value*} {r getrange lkey1 0 -1}
+ }
+
+ test "GETRANGE against string value" {
+ r set mykey "Hello World"
+ assert_equal "Hell" [r getrange mykey 0 3]
+ assert_equal "Hello World" [r getrange mykey 0 -1]
+ assert_equal "orld" [r getrange mykey -4 -1]
+ assert_equal "" [r getrange mykey 5 3]
+ assert_equal " World" [r getrange mykey 5 5000]
+ assert_equal "Hello World" [r getrange mykey -5000 10000]
+ }
+
+ test "GETRANGE against integer-encoded value" {
+ r set mykey 1234
+ assert_equal "123" [r getrange mykey 0 2]
+ assert_equal "1234" [r getrange mykey 0 -1]
+ assert_equal "234" [r getrange mykey -3 -1]
+ assert_equal "" [r getrange mykey 5 3]
+ assert_equal "4" [r getrange mykey 3 5000]
+ assert_equal "1234" [r getrange mykey -5000 10000]
+ }
+
+ test "GETRANGE fuzzing" {
+ for {set i 0} {$i < 1000} {incr i} {
+ r set bin [set bin [randstring 0 1024 binary]]
+ set _start [set start [randomInt 1500]]
+ set _end [set end [randomInt 1500]]
+ if {$_start < 0} {set _start "end-[abs($_start)-1]"}
+ if {$_end < 0} {set _end "end-[abs($_end)-1]"}
+ assert_equal [string range $bin $_start $_end] [r getrange bin $start $end]
+ }
+ }
+
+ test "Coverage: SUBSTR" {
+ r set key abcde
+ assert_equal "a" [r substr key 0 0]
+ assert_equal "abcd" [r substr key 0 3]
+ assert_equal "bcde" [r substr key -4 -1]
+ assert_equal "" [r substr key -1 -3]
+ assert_equal "" [r substr key 7 8]
+ assert_equal "" [r substr nokey 0 1]
+ }
+
+if {[string match {*jemalloc*} [s mem_allocator]]} {
+ test {trim on SET with big value} {
+ # set a big value to trigger increasing the query buf
+ r set key [string repeat A 100000]
+ # set a smaller value but > PROTO_MBULK_BIG_ARG (32*1024) Redis will try to save the query buf itself on the DB.
+ r set key [string repeat A 33000]
+ # asset the value was trimmed
+ assert {[r memory usage key] < 42000}; # 42K to count for Jemalloc's additional memory overhead.
+ }
+} ;# if jemalloc
+
+ test {Extended SET can detect syntax errors} {
+ set e {}
+ catch {r set foo bar non-existing-option} e
+ set e
+ } {*syntax*}
+
+ test {Extended SET NX option} {
+ r del foo
+ set v1 [r set foo 1 nx]
+ set v2 [r set foo 2 nx]
+ list $v1 $v2 [r get foo]
+ } {OK {} 1}
+
+ test {Extended SET XX option} {
+ r del foo
+ set v1 [r set foo 1 xx]
+ r set foo bar
+ set v2 [r set foo 2 xx]
+ list $v1 $v2 [r get foo]
+ } {{} OK 2}
+
+ test {Extended SET GET option} {
+ r del foo
+ r set foo bar
+ set old_value [r set foo bar2 GET]
+ set new_value [r get foo]
+ list $old_value $new_value
+ } {bar bar2}
+
+ test {Extended SET GET option with no previous value} {
+ r del foo
+ set old_value [r set foo bar GET]
+ set new_value [r get foo]
+ list $old_value $new_value
+ } {{} bar}
+
+ test {Extended SET GET option with XX} {
+ r del foo
+ r set foo bar
+ set old_value [r set foo baz GET XX]
+ set new_value [r get foo]
+ list $old_value $new_value
+ } {bar baz}
+
+ test {Extended SET GET option with XX and no previous value} {
+ r del foo
+ set old_value [r set foo bar GET XX]
+ set new_value [r get foo]
+ list $old_value $new_value
+ } {{} {}}
+
+ test {Extended SET GET option with NX} {
+ r del foo
+ set old_value [r set foo bar GET NX]
+ set new_value [r get foo]
+ list $old_value $new_value
+ } {{} bar}
+
+ test {Extended SET GET option with NX and previous value} {
+ r del foo
+ r set foo bar
+ set old_value [r set foo baz GET NX]
+ set new_value [r get foo]
+ list $old_value $new_value
+ } {bar bar}
+
+ test {Extended SET GET with incorrect type should result in wrong type error} {
+ r del foo
+ r rpush foo waffle
+ catch {r set foo bar GET} err1
+ assert_equal "waffle" [r rpop foo]
+ set err1
+ } {*WRONGTYPE*}
+
+ test {Extended SET EX option} {
+ r del foo
+ r set foo bar ex 10
+ set ttl [r ttl foo]
+ assert {$ttl <= 10 && $ttl > 5}
+ }
+
+ test {Extended SET PX option} {
+ r del foo
+ r set foo bar px 10000
+ set ttl [r ttl foo]
+ assert {$ttl <= 10 && $ttl > 5}
+ }
+
+ test "Extended SET EXAT option" {
+ r del foo
+ r set foo bar exat [expr [clock seconds] + 10]
+ assert_range [r ttl foo] 5 10
+ }
+
+ test "Extended SET PXAT option" {
+ r del foo
+ r set foo bar pxat [expr [clock milliseconds] + 10000]
+ assert_range [r ttl foo] 5 10
+ }
+ test {Extended SET using multiple options at once} {
+ r set foo val
+ assert {[r set foo bar xx px 10000] eq {OK}}
+ set ttl [r ttl foo]
+ assert {$ttl <= 10 && $ttl > 5}
+ }
+
+ test {GETRANGE with huge ranges, Github issue #1844} {
+ r set foo bar
+ r getrange foo 0 4294967297
+ } {bar}
+
+ set rna1 {CACCTTCCCAGGTAACAAACCAACCAACTTTCGATCTCTTGTAGATCTGTTCTCTAAACGAACTTTAAAATCTGTGTGGCTGTCACTCGGCTGCATGCTTAGTGCACTCACGCAGTATAATTAATAACTAATTACTGTCGTTGACAGGACACGAGTAACTCGTCTATCTTCTGCAGGCTGCTTACGGTTTCGTCCGTGTTGCAGCCGATCATCAGCACATCTAGGTTTCGTCCGGGTGTG}
+ set rna2 {ATTAAAGGTTTATACCTTCCCAGGTAACAAACCAACCAACTTTCGATCTCTTGTAGATCTGTTCTCTAAACGAACTTTAAAATCTGTGTGGCTGTCACTCGGCTGCATGCTTAGTGCACTCACGCAGTATAATTAATAACTAATTACTGTCGTTGACAGGACACGAGTAACTCGTCTATCTTCTGCAGGCTGCTTACGGTTTCGTCCGTGTTGCAGCCGATCATCAGCACATCTAGGTTT}
+ set rnalcs {ACCTTCCCAGGTAACAAACCAACCAACTTTCGATCTCTTGTAGATCTGTTCTCTAAACGAACTTTAAAATCTGTGTGGCTGTCACTCGGCTGCATGCTTAGTGCACTCACGCAGTATAATTAATAACTAATTACTGTCGTTGACAGGACACGAGTAACTCGTCTATCTTCTGCAGGCTGCTTACGGTTTCGTCCGTGTTGCAGCCGATCATCAGCACATCTAGGTTT}
+
+ test {LCS basic} {
+ r set virus1{t} $rna1
+ r set virus2{t} $rna2
+ r LCS virus1{t} virus2{t}
+ } $rnalcs
+
+ test {LCS len} {
+ r set virus1{t} $rna1
+ r set virus2{t} $rna2
+ r LCS virus1{t} virus2{t} LEN
+ } [string length $rnalcs]
+
+ test {LCS indexes} {
+ dict get [r LCS virus1{t} virus2{t} IDX] matches
+ } {{{238 238} {239 239}} {{236 236} {238 238}} {{229 230} {236 237}} {{224 224} {235 235}} {{1 222} {13 234}}}
+
+ test {LCS indexes with match len} {
+ dict get [r LCS virus1{t} virus2{t} IDX WITHMATCHLEN] matches
+ } {{{238 238} {239 239} 1} {{236 236} {238 238} 1} {{229 230} {236 237} 2} {{224 224} {235 235} 1} {{1 222} {13 234} 222}}
+
+ test {LCS indexes with match len and minimum match len} {
+ dict get [r LCS virus1{t} virus2{t} IDX WITHMATCHLEN MINMATCHLEN 5] matches
+ } {{{1 222} {13 234} 222}}
+
+ test {SETRANGE with huge offset} {
+ foreach value {9223372036854775807 2147483647} {
+ catch {[r setrange K $value A]} res
+ # expecting a different error on 32 and 64 bit systems
+ if {![string match "*string exceeds maximum allowed size*" $res] && ![string match "*out of range*" $res]} {
+ assert_equal $res "expecting an error"
+ }
+ }
+ }
+
+ test {APPEND modifies the encoding from int to raw} {
+ r del foo
+ r set foo 1
+ assert_encoding "int" foo
+ r append foo 2
+
+ set res {}
+ lappend res [r get foo]
+ assert_encoding "raw" foo
+
+ r set bar 12
+ assert_encoding "int" bar
+ lappend res [r get bar]
+ } {12 12}
+}
diff --git a/tests/unit/type/zset.tcl b/tests/unit/type/zset.tcl
new file mode 100644
index 0000000..33427d8
--- /dev/null
+++ b/tests/unit/type/zset.tcl
@@ -0,0 +1,2654 @@
+start_server {tags {"zset"}} {
+ proc create_zset {key items} {
+ r del $key
+ foreach {score entry} $items {
+ r zadd $key $score $entry
+ }
+ }
+
+ # A helper function to verify either ZPOP* or ZMPOP* response.
+ proc verify_pop_response {pop res zpop_expected_response zmpop_expected_response} {
+ if {[string match "*ZM*" $pop]} {
+ assert_equal $res $zmpop_expected_response
+ } else {
+ assert_equal $res $zpop_expected_response
+ }
+ }
+
+ # A helper function to verify either ZPOP* or ZMPOP* response when given one input key.
+ proc verify_zpop_response {rd pop key count zpop_expected_response zmpop_expected_response} {
+ if {[string match "ZM*" $pop]} {
+ lassign [split $pop "_"] pop where
+
+ if {$count == 0} {
+ set res [$rd $pop 1 $key $where]
+ } else {
+ set res [$rd $pop 1 $key $where COUNT $count]
+ }
+ } else {
+ if {$count == 0} {
+ set res [$rd $pop $key]
+ } else {
+ set res [$rd $pop $key $count]
+ }
+ }
+ verify_pop_response $pop $res $zpop_expected_response $zmpop_expected_response
+ }
+
+ # A helper function to verify either BZPOP* or BZMPOP* response when given one input key.
+ proc verify_bzpop_response {rd pop key timeout count bzpop_expected_response bzmpop_expected_response} {
+ if {[string match "BZM*" $pop]} {
+ lassign [split $pop "_"] pop where
+
+ if {$count == 0} {
+ $rd $pop $timeout 1 $key $where
+ } else {
+ $rd $pop $timeout 1 $key $where COUNT $count
+ }
+ } else {
+ $rd $pop $key $timeout
+ }
+ verify_pop_response $pop [$rd read] $bzpop_expected_response $bzmpop_expected_response
+ }
+
+ # A helper function to verify either ZPOP* or ZMPOP* response when given two input keys.
+ proc verify_bzpop_two_key_response {rd pop key key2 timeout count bzpop_expected_response bzmpop_expected_response} {
+ if {[string match "BZM*" $pop]} {
+ lassign [split $pop "_"] pop where
+
+ if {$count == 0} {
+ $rd $pop $timeout 2 $key $key2 $where
+ } else {
+ $rd $pop $timeout 2 $key $key2 $where COUNT $count
+ }
+ } else {
+ $rd $pop $key $key2 $timeout
+ }
+ verify_pop_response $pop [$rd read] $bzpop_expected_response $bzmpop_expected_response
+ }
+
+ # A helper function to execute either BZPOP* or BZMPOP* with one input key.
+ proc bzpop_command {rd pop key timeout} {
+ if {[string match "BZM*" $pop]} {
+ lassign [split $pop "_"] pop where
+ $rd $pop $timeout 1 $key $where COUNT 1
+ } else {
+ $rd $pop $key $timeout
+ }
+ }
+
+ # A helper function to verify nil response in readraw base on RESP version.
+ proc verify_nil_response {resp nil_response} {
+ if {$resp == 2} {
+ assert_equal $nil_response {*-1}
+ } elseif {$resp == 3} {
+ assert_equal $nil_response {_}
+ }
+ }
+
+ # A helper function to verify zset score response in readraw base on RESP version.
+ proc verify_score_response {rd resp score} {
+ if {$resp == 2} {
+ assert_equal [$rd read] {$1}
+ assert_equal [$rd read] $score
+ } elseif {$resp == 3} {
+ assert_equal [$rd read] ",$score"
+ }
+ }
+
+ proc basics {encoding} {
+ set original_max_entries [lindex [r config get zset-max-ziplist-entries] 1]
+ set original_max_value [lindex [r config get zset-max-ziplist-value] 1]
+ if {$encoding == "listpack"} {
+ r config set zset-max-ziplist-entries 128
+ r config set zset-max-ziplist-value 64
+ } elseif {$encoding == "skiplist"} {
+ r config set zset-max-ziplist-entries 0
+ r config set zset-max-ziplist-value 0
+ } else {
+ puts "Unknown sorted set encoding"
+ exit
+ }
+
+ test "Check encoding - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 x
+ assert_encoding $encoding ztmp
+ }
+
+ test "ZSET basic ZADD and score update - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 x
+ r zadd ztmp 20 y
+ r zadd ztmp 30 z
+ assert_equal {x y z} [r zrange ztmp 0 -1]
+
+ r zadd ztmp 1 y
+ assert_equal {y x z} [r zrange ztmp 0 -1]
+ }
+
+ test "ZSET element can't be set to NaN with ZADD - $encoding" {
+ assert_error "*not*float*" {r zadd myzset nan abc}
+ }
+
+ test "ZSET element can't be set to NaN with ZINCRBY - $encoding" {
+ assert_error "*not*float*" {r zincrby myzset nan abc}
+ }
+
+ test "ZADD with options syntax error with incomplete pair - $encoding" {
+ r del ztmp
+ catch {r zadd ztmp xx 10 x 20} err
+ set err
+ } {ERR*}
+
+ test "ZADD XX option without key - $encoding" {
+ r del ztmp
+ assert {[r zadd ztmp xx 10 x] == 0}
+ assert {[r type ztmp] eq {none}}
+ }
+
+ test "ZADD XX existing key - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 x
+ assert {[r zadd ztmp xx 20 y] == 0}
+ assert {[r zcard ztmp] == 1}
+ }
+
+ test "ZADD XX returns the number of elements actually added - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 x
+ set retval [r zadd ztmp 10 x 20 y 30 z]
+ assert {$retval == 2}
+ }
+
+ test "ZADD XX updates existing elements score - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 x 20 y 30 z
+ r zadd ztmp xx 5 foo 11 x 21 y 40 zap
+ assert {[r zcard ztmp] == 3}
+ assert {[r zscore ztmp x] == 11}
+ assert {[r zscore ztmp y] == 21}
+ }
+
+ test "ZADD GT updates existing elements when new scores are greater - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 x 20 y 30 z
+ assert {[r zadd ztmp gt ch 5 foo 11 x 21 y 29 z] == 3}
+ assert {[r zcard ztmp] == 4}
+ assert {[r zscore ztmp x] == 11}
+ assert {[r zscore ztmp y] == 21}
+ assert {[r zscore ztmp z] == 30}
+ }
+
+ test "ZADD LT updates existing elements when new scores are lower - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 x 20 y 30 z
+ assert {[r zadd ztmp lt ch 5 foo 11 x 21 y 29 z] == 2}
+ assert {[r zcard ztmp] == 4}
+ assert {[r zscore ztmp x] == 10}
+ assert {[r zscore ztmp y] == 20}
+ assert {[r zscore ztmp z] == 29}
+ }
+
+ test "ZADD GT XX updates existing elements when new scores are greater and skips new elements - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 x 20 y 30 z
+ assert {[r zadd ztmp gt xx ch 5 foo 11 x 21 y 29 z] == 2}
+ assert {[r zcard ztmp] == 3}
+ assert {[r zscore ztmp x] == 11}
+ assert {[r zscore ztmp y] == 21}
+ assert {[r zscore ztmp z] == 30}
+ }
+
+ test "ZADD LT XX updates existing elements when new scores are lower and skips new elements - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 x 20 y 30 z
+ assert {[r zadd ztmp lt xx ch 5 foo 11 x 21 y 29 z] == 1}
+ assert {[r zcard ztmp] == 3}
+ assert {[r zscore ztmp x] == 10}
+ assert {[r zscore ztmp y] == 20}
+ assert {[r zscore ztmp z] == 29}
+ }
+
+ test "ZADD XX and NX are not compatible - $encoding" {
+ r del ztmp
+ catch {r zadd ztmp xx nx 10 x} err
+ set err
+ } {ERR*}
+
+ test "ZADD NX with non existing key - $encoding" {
+ r del ztmp
+ r zadd ztmp nx 10 x 20 y 30 z
+ assert {[r zcard ztmp] == 3}
+ }
+
+ test "ZADD NX only add new elements without updating old ones - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 x 20 y 30 z
+ assert {[r zadd ztmp nx 11 x 21 y 100 a 200 b] == 2}
+ assert {[r zscore ztmp x] == 10}
+ assert {[r zscore ztmp y] == 20}
+ assert {[r zscore ztmp a] == 100}
+ assert {[r zscore ztmp b] == 200}
+ }
+
+ test "ZADD GT and NX are not compatible - $encoding" {
+ r del ztmp
+ catch {r zadd ztmp gt nx 10 x} err
+ set err
+ } {ERR*}
+
+ test "ZADD LT and NX are not compatible - $encoding" {
+ r del ztmp
+ catch {r zadd ztmp lt nx 10 x} err
+ set err
+ } {ERR*}
+
+ test "ZADD LT and GT are not compatible - $encoding" {
+ r del ztmp
+ catch {r zadd ztmp lt gt 10 x} err
+ set err
+ } {ERR*}
+
+ test "ZADD INCR LT/GT replies with nill if score not updated - $encoding" {
+ r del ztmp
+ r zadd ztmp 28 x
+ assert {[r zadd ztmp lt incr 1 x] eq {}}
+ assert {[r zscore ztmp x] == 28}
+ assert {[r zadd ztmp gt incr -1 x] eq {}}
+ assert {[r zscore ztmp x] == 28}
+ }
+
+ test "ZADD INCR LT/GT with inf - $encoding" {
+ r del ztmp
+ r zadd ztmp +inf x -inf y
+
+ assert {[r zadd ztmp lt incr 1 x] eq {}}
+ assert {[r zscore ztmp x] == inf}
+ assert {[r zadd ztmp gt incr -1 x] eq {}}
+ assert {[r zscore ztmp x] == inf}
+ assert {[r zadd ztmp lt incr -1 x] eq {}}
+ assert {[r zscore ztmp x] == inf}
+ assert {[r zadd ztmp gt incr 1 x] eq {}}
+ assert {[r zscore ztmp x] == inf}
+
+ assert {[r zadd ztmp lt incr 1 y] eq {}}
+ assert {[r zscore ztmp y] == -inf}
+ assert {[r zadd ztmp gt incr -1 y] eq {}}
+ assert {[r zscore ztmp y] == -inf}
+ assert {[r zadd ztmp lt incr -1 y] eq {}}
+ assert {[r zscore ztmp y] == -inf}
+ assert {[r zadd ztmp gt incr 1 y] eq {}}
+ assert {[r zscore ztmp y] == -inf}
+ }
+
+ test "ZADD INCR works like ZINCRBY - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 x 20 y 30 z
+ r zadd ztmp INCR 15 x
+ assert {[r zscore ztmp x] == 25}
+ }
+
+ test "ZADD INCR works with a single score-elemenet pair - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 x 20 y 30 z
+ catch {r zadd ztmp INCR 15 x 10 y} err
+ set err
+ } {ERR*}
+
+ test "ZADD CH option changes return value to all changed elements - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 x 20 y 30 z
+ assert {[r zadd ztmp 11 x 21 y 30 z] == 0}
+ assert {[r zadd ztmp ch 12 x 22 y 30 z] == 2}
+ }
+
+ test "ZINCRBY calls leading to NaN result in error - $encoding" {
+ r zincrby myzset +inf abc
+ assert_error "*NaN*" {r zincrby myzset -inf abc}
+ }
+
+ test "ZADD - Variadic version base case - $encoding" {
+ r del myzset
+ list [r zadd myzset 10 a 20 b 30 c] [r zrange myzset 0 -1 withscores]
+ } {3 {a 10 b 20 c 30}}
+
+ test "ZADD - Return value is the number of actually added items - $encoding" {
+ list [r zadd myzset 5 x 20 b 30 c] [r zrange myzset 0 -1 withscores]
+ } {1 {x 5 a 10 b 20 c 30}}
+
+ test "ZADD - Variadic version does not add nothing on single parsing err - $encoding" {
+ r del myzset
+ catch {r zadd myzset 10 a 20 b 30.badscore c} e
+ assert_match {*ERR*not*float*} $e
+ r exists myzset
+ } {0}
+
+ test "ZADD - Variadic version will raise error on missing arg - $encoding" {
+ r del myzset
+ catch {r zadd myzset 10 a 20 b 30 c 40} e
+ assert_match {*ERR*syntax*} $e
+ }
+
+ test "ZINCRBY does not work variadic even if shares ZADD implementation - $encoding" {
+ r del myzset
+ catch {r zincrby myzset 10 a 20 b 30 c} e
+ assert_match {*ERR*wrong*number*arg*} $e
+ }
+
+ test "ZCARD basics - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 a 20 b 30 c
+ assert_equal 3 [r zcard ztmp]
+ assert_equal 0 [r zcard zdoesntexist]
+ }
+
+ test "ZREM removes key after last element is removed - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 x
+ r zadd ztmp 20 y
+
+ assert_equal 1 [r exists ztmp]
+ assert_equal 0 [r zrem ztmp z]
+ assert_equal 1 [r zrem ztmp y]
+ assert_equal 1 [r zrem ztmp x]
+ assert_equal 0 [r exists ztmp]
+ }
+
+ test "ZREM variadic version - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 a 20 b 30 c
+ assert_equal 2 [r zrem ztmp x y a b k]
+ assert_equal 0 [r zrem ztmp foo bar]
+ assert_equal 1 [r zrem ztmp c]
+ r exists ztmp
+ } {0}
+
+ test "ZREM variadic version -- remove elements after key deletion - $encoding" {
+ r del ztmp
+ r zadd ztmp 10 a 20 b 30 c
+ r zrem ztmp a b c d e f g
+ } {3}
+
+ test "ZRANGE basics - $encoding" {
+ r del ztmp
+ r zadd ztmp 1 a
+ r zadd ztmp 2 b
+ r zadd ztmp 3 c
+ r zadd ztmp 4 d
+
+ assert_equal {a b c d} [r zrange ztmp 0 -1]
+ assert_equal {a b c} [r zrange ztmp 0 -2]
+ assert_equal {b c d} [r zrange ztmp 1 -1]
+ assert_equal {b c} [r zrange ztmp 1 -2]
+ assert_equal {c d} [r zrange ztmp -2 -1]
+ assert_equal {c} [r zrange ztmp -2 -2]
+
+ # out of range start index
+ assert_equal {a b c} [r zrange ztmp -5 2]
+ assert_equal {a b} [r zrange ztmp -5 1]
+ assert_equal {} [r zrange ztmp 5 -1]
+ assert_equal {} [r zrange ztmp 5 -2]
+
+ # out of range end index
+ assert_equal {a b c d} [r zrange ztmp 0 5]
+ assert_equal {b c d} [r zrange ztmp 1 5]
+ assert_equal {} [r zrange ztmp 0 -5]
+ assert_equal {} [r zrange ztmp 1 -5]
+
+ # withscores
+ assert_equal {a 1 b 2 c 3 d 4} [r zrange ztmp 0 -1 withscores]
+ }
+
+ test "ZREVRANGE basics - $encoding" {
+ r del ztmp
+ r zadd ztmp 1 a
+ r zadd ztmp 2 b
+ r zadd ztmp 3 c
+ r zadd ztmp 4 d
+
+ assert_equal {d c b a} [r zrevrange ztmp 0 -1]
+ assert_equal {d c b} [r zrevrange ztmp 0 -2]
+ assert_equal {c b a} [r zrevrange ztmp 1 -1]
+ assert_equal {c b} [r zrevrange ztmp 1 -2]
+ assert_equal {b a} [r zrevrange ztmp -2 -1]
+ assert_equal {b} [r zrevrange ztmp -2 -2]
+
+ # out of range start index
+ assert_equal {d c b} [r zrevrange ztmp -5 2]
+ assert_equal {d c} [r zrevrange ztmp -5 1]
+ assert_equal {} [r zrevrange ztmp 5 -1]
+ assert_equal {} [r zrevrange ztmp 5 -2]
+
+ # out of range end index
+ assert_equal {d c b a} [r zrevrange ztmp 0 5]
+ assert_equal {c b a} [r zrevrange ztmp 1 5]
+ assert_equal {} [r zrevrange ztmp 0 -5]
+ assert_equal {} [r zrevrange ztmp 1 -5]
+
+ # withscores
+ assert_equal {d 4 c 3 b 2 a 1} [r zrevrange ztmp 0 -1 withscores]
+ }
+
+ test "ZRANK/ZREVRANK basics - $encoding" {
+ set nullres {$-1}
+ if {$::force_resp3} {
+ set nullres {_}
+ }
+ r del zranktmp
+ r zadd zranktmp 10 x
+ r zadd zranktmp 20 y
+ r zadd zranktmp 30 z
+ assert_equal 0 [r zrank zranktmp x]
+ assert_equal 1 [r zrank zranktmp y]
+ assert_equal 2 [r zrank zranktmp z]
+ assert_equal 2 [r zrevrank zranktmp x]
+ assert_equal 1 [r zrevrank zranktmp y]
+ assert_equal 0 [r zrevrank zranktmp z]
+ r readraw 1
+ assert_equal $nullres [r zrank zranktmp foo]
+ assert_equal $nullres [r zrevrank zranktmp foo]
+ r readraw 0
+
+ # withscore
+ set nullres {*-1}
+ if {$::force_resp3} {
+ set nullres {_}
+ }
+ assert_equal {0 10} [r zrank zranktmp x withscore]
+ assert_equal {1 20} [r zrank zranktmp y withscore]
+ assert_equal {2 30} [r zrank zranktmp z withscore]
+ assert_equal {2 10} [r zrevrank zranktmp x withscore]
+ assert_equal {1 20} [r zrevrank zranktmp y withscore]
+ assert_equal {0 30} [r zrevrank zranktmp z withscore]
+ r readraw 1
+ assert_equal $nullres [r zrank zranktmp foo withscore]
+ assert_equal $nullres [r zrevrank zranktmp foo withscore]
+ r readraw 0
+ }
+
+ test "ZRANK - after deletion - $encoding" {
+ r zrem zranktmp y
+ assert_equal 0 [r zrank zranktmp x]
+ assert_equal 1 [r zrank zranktmp z]
+ assert_equal {0 10} [r zrank zranktmp x withscore]
+ assert_equal {1 30} [r zrank zranktmp z withscore]
+ }
+
+ test "ZINCRBY - can create a new sorted set - $encoding" {
+ r del zset
+ r zincrby zset 1 foo
+ assert_equal {foo} [r zrange zset 0 -1]
+ assert_equal 1 [r zscore zset foo]
+ }
+
+ test "ZINCRBY - increment and decrement - $encoding" {
+ r zincrby zset 2 foo
+ r zincrby zset 1 bar
+ assert_equal {bar foo} [r zrange zset 0 -1]
+
+ r zincrby zset 10 bar
+ r zincrby zset -5 foo
+ r zincrby zset -5 bar
+ assert_equal {foo bar} [r zrange zset 0 -1]
+
+ assert_equal -2 [r zscore zset foo]
+ assert_equal 6 [r zscore zset bar]
+ }
+
+ test "ZINCRBY return value - $encoding" {
+ r del ztmp
+ set retval [r zincrby ztmp 1.0 x]
+ assert {$retval == 1.0}
+ }
+
+ proc create_default_zset {} {
+ create_zset zset {-inf a 1 b 2 c 3 d 4 e 5 f +inf g}
+ }
+
+ test "ZRANGEBYSCORE/ZREVRANGEBYSCORE/ZCOUNT basics - $encoding" {
+ create_default_zset
+
+ # inclusive range
+ assert_equal {a b c} [r zrangebyscore zset -inf 2]
+ assert_equal {b c d} [r zrangebyscore zset 0 3]
+ assert_equal {d e f} [r zrangebyscore zset 3 6]
+ assert_equal {e f g} [r zrangebyscore zset 4 +inf]
+ assert_equal {c b a} [r zrevrangebyscore zset 2 -inf]
+ assert_equal {d c b} [r zrevrangebyscore zset 3 0]
+ assert_equal {f e d} [r zrevrangebyscore zset 6 3]
+ assert_equal {g f e} [r zrevrangebyscore zset +inf 4]
+ assert_equal 3 [r zcount zset 0 3]
+
+ # exclusive range
+ assert_equal {b} [r zrangebyscore zset (-inf (2]
+ assert_equal {b c} [r zrangebyscore zset (0 (3]
+ assert_equal {e f} [r zrangebyscore zset (3 (6]
+ assert_equal {f} [r zrangebyscore zset (4 (+inf]
+ assert_equal {b} [r zrevrangebyscore zset (2 (-inf]
+ assert_equal {c b} [r zrevrangebyscore zset (3 (0]
+ assert_equal {f e} [r zrevrangebyscore zset (6 (3]
+ assert_equal {f} [r zrevrangebyscore zset (+inf (4]
+ assert_equal 2 [r zcount zset (0 (3]
+
+ # test empty ranges
+ r zrem zset a
+ r zrem zset g
+
+ # inclusive
+ assert_equal {} [r zrangebyscore zset 4 2]
+ assert_equal {} [r zrangebyscore zset 6 +inf]
+ assert_equal {} [r zrangebyscore zset -inf -6]
+ assert_equal {} [r zrevrangebyscore zset +inf 6]
+ assert_equal {} [r zrevrangebyscore zset -6 -inf]
+
+ # exclusive
+ assert_equal {} [r zrangebyscore zset (4 (2]
+ assert_equal {} [r zrangebyscore zset 2 (2]
+ assert_equal {} [r zrangebyscore zset (2 2]
+ assert_equal {} [r zrangebyscore zset (6 (+inf]
+ assert_equal {} [r zrangebyscore zset (-inf (-6]
+ assert_equal {} [r zrevrangebyscore zset (+inf (6]
+ assert_equal {} [r zrevrangebyscore zset (-6 (-inf]
+
+ # empty inner range
+ assert_equal {} [r zrangebyscore zset 2.4 2.6]
+ assert_equal {} [r zrangebyscore zset (2.4 2.6]
+ assert_equal {} [r zrangebyscore zset 2.4 (2.6]
+ assert_equal {} [r zrangebyscore zset (2.4 (2.6]
+ }
+
+ test "ZRANGEBYSCORE with WITHSCORES - $encoding" {
+ create_default_zset
+ assert_equal {b 1 c 2 d 3} [r zrangebyscore zset 0 3 withscores]
+ assert_equal {d 3 c 2 b 1} [r zrevrangebyscore zset 3 0 withscores]
+ }
+
+ test "ZRANGEBYSCORE with LIMIT - $encoding" {
+ create_default_zset
+ assert_equal {b c} [r zrangebyscore zset 0 10 LIMIT 0 2]
+ assert_equal {d e f} [r zrangebyscore zset 0 10 LIMIT 2 3]
+ assert_equal {d e f} [r zrangebyscore zset 0 10 LIMIT 2 10]
+ assert_equal {} [r zrangebyscore zset 0 10 LIMIT 20 10]
+ assert_equal {f e} [r zrevrangebyscore zset 10 0 LIMIT 0 2]
+ assert_equal {d c b} [r zrevrangebyscore zset 10 0 LIMIT 2 3]
+ assert_equal {d c b} [r zrevrangebyscore zset 10 0 LIMIT 2 10]
+ assert_equal {} [r zrevrangebyscore zset 10 0 LIMIT 20 10]
+ }
+
+ test "ZRANGEBYSCORE with LIMIT and WITHSCORES - $encoding" {
+ create_default_zset
+ assert_equal {e 4 f 5} [r zrangebyscore zset 2 5 LIMIT 2 3 WITHSCORES]
+ assert_equal {d 3 c 2} [r zrevrangebyscore zset 5 2 LIMIT 2 3 WITHSCORES]
+ assert_equal {} [r zrangebyscore zset 2 5 LIMIT 12 13 WITHSCORES]
+ }
+
+ test "ZRANGEBYSCORE with non-value min or max - $encoding" {
+ assert_error "*not*float*" {r zrangebyscore fooz str 1}
+ assert_error "*not*float*" {r zrangebyscore fooz 1 str}
+ assert_error "*not*float*" {r zrangebyscore fooz 1 NaN}
+ }
+
+ proc create_default_lex_zset {} {
+ create_zset zset {0 alpha 0 bar 0 cool 0 down
+ 0 elephant 0 foo 0 great 0 hill
+ 0 omega}
+ }
+
+ test "ZRANGEBYLEX/ZREVRANGEBYLEX/ZLEXCOUNT basics - $encoding" {
+ create_default_lex_zset
+
+ # inclusive range
+ assert_equal {alpha bar cool} [r zrangebylex zset - \[cool]
+ assert_equal {bar cool down} [r zrangebylex zset \[bar \[down]
+ assert_equal {great hill omega} [r zrangebylex zset \[g +]
+ assert_equal {cool bar alpha} [r zrevrangebylex zset \[cool -]
+ assert_equal {down cool bar} [r zrevrangebylex zset \[down \[bar]
+ assert_equal {omega hill great foo elephant down} [r zrevrangebylex zset + \[d]
+ assert_equal 3 [r zlexcount zset \[ele \[h]
+
+ # exclusive range
+ assert_equal {alpha bar} [r zrangebylex zset - (cool]
+ assert_equal {cool} [r zrangebylex zset (bar (down]
+ assert_equal {hill omega} [r zrangebylex zset (great +]
+ assert_equal {bar alpha} [r zrevrangebylex zset (cool -]
+ assert_equal {cool} [r zrevrangebylex zset (down (bar]
+ assert_equal {omega hill} [r zrevrangebylex zset + (great]
+ assert_equal 2 [r zlexcount zset (ele (great]
+
+ # inclusive and exclusive
+ assert_equal {} [r zrangebylex zset (az (b]
+ assert_equal {} [r zrangebylex zset (z +]
+ assert_equal {} [r zrangebylex zset - \[aaaa]
+ assert_equal {} [r zrevrangebylex zset \[elez \[elex]
+ assert_equal {} [r zrevrangebylex zset (hill (omega]
+ }
+
+ test "ZLEXCOUNT advanced - $encoding" {
+ create_default_lex_zset
+
+ assert_equal 9 [r zlexcount zset - +]
+ assert_equal 0 [r zlexcount zset + -]
+ assert_equal 0 [r zlexcount zset + \[c]
+ assert_equal 0 [r zlexcount zset \[c -]
+ assert_equal 8 [r zlexcount zset \[bar +]
+ assert_equal 5 [r zlexcount zset \[bar \[foo]
+ assert_equal 4 [r zlexcount zset \[bar (foo]
+ assert_equal 4 [r zlexcount zset (bar \[foo]
+ assert_equal 3 [r zlexcount zset (bar (foo]
+ assert_equal 5 [r zlexcount zset - (foo]
+ assert_equal 1 [r zlexcount zset (maxstring +]
+ }
+
+ test "ZRANGEBYSLEX with LIMIT - $encoding" {
+ create_default_lex_zset
+ assert_equal {alpha bar} [r zrangebylex zset - \[cool LIMIT 0 2]
+ assert_equal {bar cool} [r zrangebylex zset - \[cool LIMIT 1 2]
+ assert_equal {} [r zrangebylex zset \[bar \[down LIMIT 0 0]
+ assert_equal {} [r zrangebylex zset \[bar \[down LIMIT 2 0]
+ assert_equal {bar} [r zrangebylex zset \[bar \[down LIMIT 0 1]
+ assert_equal {cool} [r zrangebylex zset \[bar \[down LIMIT 1 1]
+ assert_equal {bar cool down} [r zrangebylex zset \[bar \[down LIMIT 0 100]
+ assert_equal {omega hill great foo elephant} [r zrevrangebylex zset + \[d LIMIT 0 5]
+ assert_equal {omega hill great foo} [r zrevrangebylex zset + \[d LIMIT 0 4]
+ }
+
+ test "ZRANGEBYLEX with invalid lex range specifiers - $encoding" {
+ assert_error "*not*string*" {r zrangebylex fooz foo bar}
+ assert_error "*not*string*" {r zrangebylex fooz \[foo bar}
+ assert_error "*not*string*" {r zrangebylex fooz foo \[bar}
+ assert_error "*not*string*" {r zrangebylex fooz +x \[bar}
+ assert_error "*not*string*" {r zrangebylex fooz -x \[bar}
+ }
+
+ test "ZREMRANGEBYSCORE basics - $encoding" {
+ proc remrangebyscore {min max} {
+ create_zset zset {1 a 2 b 3 c 4 d 5 e}
+ assert_equal 1 [r exists zset]
+ r zremrangebyscore zset $min $max
+ }
+
+ # inner range
+ assert_equal 3 [remrangebyscore 2 4]
+ assert_equal {a e} [r zrange zset 0 -1]
+
+ # start underflow
+ assert_equal 1 [remrangebyscore -10 1]
+ assert_equal {b c d e} [r zrange zset 0 -1]
+
+ # end overflow
+ assert_equal 1 [remrangebyscore 5 10]
+ assert_equal {a b c d} [r zrange zset 0 -1]
+
+ # switch min and max
+ assert_equal 0 [remrangebyscore 4 2]
+ assert_equal {a b c d e} [r zrange zset 0 -1]
+
+ # -inf to mid
+ assert_equal 3 [remrangebyscore -inf 3]
+ assert_equal {d e} [r zrange zset 0 -1]
+
+ # mid to +inf
+ assert_equal 3 [remrangebyscore 3 +inf]
+ assert_equal {a b} [r zrange zset 0 -1]
+
+ # -inf to +inf
+ assert_equal 5 [remrangebyscore -inf +inf]
+ assert_equal {} [r zrange zset 0 -1]
+
+ # exclusive min
+ assert_equal 4 [remrangebyscore (1 5]
+ assert_equal {a} [r zrange zset 0 -1]
+ assert_equal 3 [remrangebyscore (2 5]
+ assert_equal {a b} [r zrange zset 0 -1]
+
+ # exclusive max
+ assert_equal 4 [remrangebyscore 1 (5]
+ assert_equal {e} [r zrange zset 0 -1]
+ assert_equal 3 [remrangebyscore 1 (4]
+ assert_equal {d e} [r zrange zset 0 -1]
+
+ # exclusive min and max
+ assert_equal 3 [remrangebyscore (1 (5]
+ assert_equal {a e} [r zrange zset 0 -1]
+
+ # destroy when empty
+ assert_equal 5 [remrangebyscore 1 5]
+ assert_equal 0 [r exists zset]
+ }
+
+ test "ZREMRANGEBYSCORE with non-value min or max - $encoding" {
+ assert_error "*not*float*" {r zremrangebyscore fooz str 1}
+ assert_error "*not*float*" {r zremrangebyscore fooz 1 str}
+ assert_error "*not*float*" {r zremrangebyscore fooz 1 NaN}
+ }
+
+ test "ZREMRANGEBYRANK basics - $encoding" {
+ proc remrangebyrank {min max} {
+ create_zset zset {1 a 2 b 3 c 4 d 5 e}
+ assert_equal 1 [r exists zset]
+ r zremrangebyrank zset $min $max
+ }
+
+ # inner range
+ assert_equal 3 [remrangebyrank 1 3]
+ assert_equal {a e} [r zrange zset 0 -1]
+
+ # start underflow
+ assert_equal 1 [remrangebyrank -10 0]
+ assert_equal {b c d e} [r zrange zset 0 -1]
+
+ # start overflow
+ assert_equal 0 [remrangebyrank 10 -1]
+ assert_equal {a b c d e} [r zrange zset 0 -1]
+
+ # end underflow
+ assert_equal 0 [remrangebyrank 0 -10]
+ assert_equal {a b c d e} [r zrange zset 0 -1]
+
+ # end overflow
+ assert_equal 5 [remrangebyrank 0 10]
+ assert_equal {} [r zrange zset 0 -1]
+
+ # destroy when empty
+ assert_equal 5 [remrangebyrank 0 4]
+ assert_equal 0 [r exists zset]
+ }
+
+ test "ZREMRANGEBYLEX basics - $encoding" {
+ proc remrangebylex {min max} {
+ create_default_lex_zset
+ assert_equal 1 [r exists zset]
+ r zremrangebylex zset $min $max
+ }
+
+ # inclusive range
+ assert_equal 3 [remrangebylex - \[cool]
+ assert_equal {down elephant foo great hill omega} [r zrange zset 0 -1]
+ assert_equal 3 [remrangebylex \[bar \[down]
+ assert_equal {alpha elephant foo great hill omega} [r zrange zset 0 -1]
+ assert_equal 3 [remrangebylex \[g +]
+ assert_equal {alpha bar cool down elephant foo} [r zrange zset 0 -1]
+ assert_equal 6 [r zcard zset]
+
+ # exclusive range
+ assert_equal 2 [remrangebylex - (cool]
+ assert_equal {cool down elephant foo great hill omega} [r zrange zset 0 -1]
+ assert_equal 1 [remrangebylex (bar (down]
+ assert_equal {alpha bar down elephant foo great hill omega} [r zrange zset 0 -1]
+ assert_equal 2 [remrangebylex (great +]
+ assert_equal {alpha bar cool down elephant foo great} [r zrange zset 0 -1]
+ assert_equal 7 [r zcard zset]
+
+ # inclusive and exclusive
+ assert_equal 0 [remrangebylex (az (b]
+ assert_equal {alpha bar cool down elephant foo great hill omega} [r zrange zset 0 -1]
+ assert_equal 0 [remrangebylex (z +]
+ assert_equal {alpha bar cool down elephant foo great hill omega} [r zrange zset 0 -1]
+ assert_equal 0 [remrangebylex - \[aaaa]
+ assert_equal {alpha bar cool down elephant foo great hill omega} [r zrange zset 0 -1]
+ assert_equal 9 [r zcard zset]
+
+ # destroy when empty
+ assert_equal 9 [remrangebylex - +]
+ assert_equal 0 [r zcard zset]
+ assert_equal 0 [r exists zset]
+ }
+
+ test "ZUNIONSTORE against non-existing key doesn't set destination - $encoding" {
+ r del zseta{t}
+ assert_equal 0 [r zunionstore dst_key{t} 1 zseta{t}]
+ assert_equal 0 [r exists dst_key{t}]
+ }
+
+ test "ZUNION/ZINTER/ZINTERCARD/ZDIFF against non-existing key - $encoding" {
+ r del zseta
+ assert_equal {} [r zunion 1 zseta]
+ assert_equal {} [r zinter 1 zseta]
+ assert_equal 0 [r zintercard 1 zseta]
+ assert_equal 0 [r zintercard 1 zseta limit 0]
+ assert_equal {} [r zdiff 1 zseta]
+ }
+
+ test "ZUNIONSTORE with empty set - $encoding" {
+ r del zseta{t} zsetb{t}
+ r zadd zseta{t} 1 a
+ r zadd zseta{t} 2 b
+ r zunionstore zsetc{t} 2 zseta{t} zsetb{t}
+ r zrange zsetc{t} 0 -1 withscores
+ } {a 1 b 2}
+
+ test "ZUNION/ZINTER/ZINTERCARD/ZDIFF with empty set - $encoding" {
+ r del zseta{t} zsetb{t}
+ r zadd zseta{t} 1 a
+ r zadd zseta{t} 2 b
+ assert_equal {a 1 b 2} [r zunion 2 zseta{t} zsetb{t} withscores]
+ assert_equal {} [r zinter 2 zseta{t} zsetb{t} withscores]
+ assert_equal 0 [r zintercard 2 zseta{t} zsetb{t}]
+ assert_equal 0 [r zintercard 2 zseta{t} zsetb{t} limit 0]
+ assert_equal {a 1 b 2} [r zdiff 2 zseta{t} zsetb{t} withscores]
+ }
+
+ test "ZUNIONSTORE basics - $encoding" {
+ r del zseta{t} zsetb{t} zsetc{t}
+ r zadd zseta{t} 1 a
+ r zadd zseta{t} 2 b
+ r zadd zseta{t} 3 c
+ r zadd zsetb{t} 1 b
+ r zadd zsetb{t} 2 c
+ r zadd zsetb{t} 3 d
+
+ assert_equal 4 [r zunionstore zsetc{t} 2 zseta{t} zsetb{t}]
+ assert_equal {a 1 b 3 d 3 c 5} [r zrange zsetc{t} 0 -1 withscores]
+ }
+
+ test "ZUNION/ZINTER/ZINTERCARD/ZDIFF with integer members - $encoding" {
+ r del zsetd{t} zsetf{t}
+ r zadd zsetd{t} 1 1
+ r zadd zsetd{t} 2 2
+ r zadd zsetd{t} 3 3
+ r zadd zsetf{t} 1 1
+ r zadd zsetf{t} 3 3
+ r zadd zsetf{t} 4 4
+
+ assert_equal {1 2 2 2 4 4 3 6} [r zunion 2 zsetd{t} zsetf{t} withscores]
+ assert_equal {1 2 3 6} [r zinter 2 zsetd{t} zsetf{t} withscores]
+ assert_equal 2 [r zintercard 2 zsetd{t} zsetf{t}]
+ assert_equal 2 [r zintercard 2 zsetd{t} zsetf{t} limit 0]
+ assert_equal {2 2} [r zdiff 2 zsetd{t} zsetf{t} withscores]
+ }
+
+ test "ZUNIONSTORE with weights - $encoding" {
+ assert_equal 4 [r zunionstore zsetc{t} 2 zseta{t} zsetb{t} weights 2 3]
+ assert_equal {a 2 b 7 d 9 c 12} [r zrange zsetc{t} 0 -1 withscores]
+ }
+
+ test "ZUNION with weights - $encoding" {
+ assert_equal {a 2 b 7 d 9 c 12} [r zunion 2 zseta{t} zsetb{t} weights 2 3 withscores]
+ assert_equal {b 7 c 12} [r zinter 2 zseta{t} zsetb{t} weights 2 3 withscores]
+ }
+
+ test "ZUNIONSTORE with a regular set and weights - $encoding" {
+ r del seta{t}
+ r sadd seta{t} a
+ r sadd seta{t} b
+ r sadd seta{t} c
+
+ assert_equal 4 [r zunionstore zsetc{t} 2 seta{t} zsetb{t} weights 2 3]
+ assert_equal {a 2 b 5 c 8 d 9} [r zrange zsetc{t} 0 -1 withscores]
+ }
+
+ test "ZUNIONSTORE with AGGREGATE MIN - $encoding" {
+ assert_equal 4 [r zunionstore zsetc{t} 2 zseta{t} zsetb{t} aggregate min]
+ assert_equal {a 1 b 1 c 2 d 3} [r zrange zsetc{t} 0 -1 withscores]
+ }
+
+ test "ZUNION/ZINTER with AGGREGATE MIN - $encoding" {
+ assert_equal {a 1 b 1 c 2 d 3} [r zunion 2 zseta{t} zsetb{t} aggregate min withscores]
+ assert_equal {b 1 c 2} [r zinter 2 zseta{t} zsetb{t} aggregate min withscores]
+ }
+
+ test "ZUNIONSTORE with AGGREGATE MAX - $encoding" {
+ assert_equal 4 [r zunionstore zsetc{t} 2 zseta{t} zsetb{t} aggregate max]
+ assert_equal {a 1 b 2 c 3 d 3} [r zrange zsetc{t} 0 -1 withscores]
+ }
+
+ test "ZUNION/ZINTER with AGGREGATE MAX - $encoding" {
+ assert_equal {a 1 b 2 c 3 d 3} [r zunion 2 zseta{t} zsetb{t} aggregate max withscores]
+ assert_equal {b 2 c 3} [r zinter 2 zseta{t} zsetb{t} aggregate max withscores]
+ }
+
+ test "ZINTERSTORE basics - $encoding" {
+ assert_equal 2 [r zinterstore zsetc{t} 2 zseta{t} zsetb{t}]
+ assert_equal {b 3 c 5} [r zrange zsetc{t} 0 -1 withscores]
+ }
+
+ test "ZINTER basics - $encoding" {
+ assert_equal {b 3 c 5} [r zinter 2 zseta{t} zsetb{t} withscores]
+ }
+
+ test "ZINTERCARD with illegal arguments" {
+ assert_error "ERR syntax error*" {r zintercard 1 zseta{t} zseta{t}}
+ assert_error "ERR syntax error*" {r zintercard 1 zseta{t} bar_arg}
+ assert_error "ERR syntax error*" {r zintercard 1 zseta{t} LIMIT}
+
+ assert_error "ERR LIMIT*" {r zintercard 1 myset{t} LIMIT -1}
+ assert_error "ERR LIMIT*" {r zintercard 1 myset{t} LIMIT a}
+ }
+
+ test "ZINTERCARD basics - $encoding" {
+ assert_equal 2 [r zintercard 2 zseta{t} zsetb{t}]
+ assert_equal 2 [r zintercard 2 zseta{t} zsetb{t} limit 0]
+ assert_equal 1 [r zintercard 2 zseta{t} zsetb{t} limit 1]
+ assert_equal 2 [r zintercard 2 zseta{t} zsetb{t} limit 10]
+ }
+
+ test "ZINTER RESP3 - $encoding" {
+ r hello 3
+ assert_equal {{b 3.0} {c 5.0}} [r zinter 2 zseta{t} zsetb{t} withscores]
+ r hello 2
+ }
+
+ test "ZINTERSTORE with weights - $encoding" {
+ assert_equal 2 [r zinterstore zsetc{t} 2 zseta{t} zsetb{t} weights 2 3]
+ assert_equal {b 7 c 12} [r zrange zsetc{t} 0 -1 withscores]
+ }
+
+ test "ZINTER with weights - $encoding" {
+ assert_equal {b 7 c 12} [r zinter 2 zseta{t} zsetb{t} weights 2 3 withscores]
+ }
+
+ test "ZINTERSTORE with a regular set and weights - $encoding" {
+ r del seta{t}
+ r sadd seta{t} a
+ r sadd seta{t} b
+ r sadd seta{t} c
+ assert_equal 2 [r zinterstore zsetc{t} 2 seta{t} zsetb{t} weights 2 3]
+ assert_equal {b 5 c 8} [r zrange zsetc{t} 0 -1 withscores]
+ }
+
+ test "ZINTERSTORE with AGGREGATE MIN - $encoding" {
+ assert_equal 2 [r zinterstore zsetc{t} 2 zseta{t} zsetb{t} aggregate min]
+ assert_equal {b 1 c 2} [r zrange zsetc{t} 0 -1 withscores]
+ }
+
+ test "ZINTERSTORE with AGGREGATE MAX - $encoding" {
+ assert_equal 2 [r zinterstore zsetc{t} 2 zseta{t} zsetb{t} aggregate max]
+ assert_equal {b 2 c 3} [r zrange zsetc{t} 0 -1 withscores]
+ }
+
+ foreach cmd {ZUNIONSTORE ZINTERSTORE} {
+ test "$cmd with +inf/-inf scores - $encoding" {
+ r del zsetinf1{t} zsetinf2{t}
+
+ r zadd zsetinf1{t} +inf key
+ r zadd zsetinf2{t} +inf key
+ r $cmd zsetinf3{t} 2 zsetinf1{t} zsetinf2{t}
+ assert_equal inf [r zscore zsetinf3{t} key]
+
+ r zadd zsetinf1{t} -inf key
+ r zadd zsetinf2{t} +inf key
+ r $cmd zsetinf3{t} 2 zsetinf1{t} zsetinf2{t}
+ assert_equal 0 [r zscore zsetinf3{t} key]
+
+ r zadd zsetinf1{t} +inf key
+ r zadd zsetinf2{t} -inf key
+ r $cmd zsetinf3{t} 2 zsetinf1{t} zsetinf2{t}
+ assert_equal 0 [r zscore zsetinf3{t} key]
+
+ r zadd zsetinf1{t} -inf key
+ r zadd zsetinf2{t} -inf key
+ r $cmd zsetinf3{t} 2 zsetinf1{t} zsetinf2{t}
+ assert_equal -inf [r zscore zsetinf3{t} key]
+ }
+
+ test "$cmd with NaN weights - $encoding" {
+ r del zsetinf1{t} zsetinf2{t}
+
+ r zadd zsetinf1{t} 1.0 key
+ r zadd zsetinf2{t} 1.0 key
+ assert_error "*weight*not*float*" {
+ r $cmd zsetinf3{t} 2 zsetinf1{t} zsetinf2{t} weights nan nan
+ }
+ }
+ }
+
+ test "ZDIFFSTORE basics - $encoding" {
+ assert_equal 1 [r zdiffstore zsetc{t} 2 zseta{t} zsetb{t}]
+ assert_equal {a 1} [r zrange zsetc{t} 0 -1 withscores]
+ }
+
+ test "ZDIFF basics - $encoding" {
+ assert_equal {a 1} [r zdiff 2 zseta{t} zsetb{t} withscores]
+ }
+
+ test "ZDIFFSTORE with a regular set - $encoding" {
+ r del seta{t}
+ r sadd seta{t} a
+ r sadd seta{t} b
+ r sadd seta{t} c
+ assert_equal 1 [r zdiffstore zsetc{t} 2 seta{t} zsetb{t}]
+ assert_equal {a 1} [r zrange zsetc{t} 0 -1 withscores]
+ }
+
+ test "ZDIFF subtracting set from itself - $encoding" {
+ assert_equal 0 [r zdiffstore zsetc{t} 2 zseta{t} zseta{t}]
+ assert_equal {} [r zrange zsetc{t} 0 -1 withscores]
+ }
+
+ test "ZDIFF algorithm 1 - $encoding" {
+ r del zseta{t} zsetb{t} zsetc{t}
+ r zadd zseta{t} 1 a
+ r zadd zseta{t} 2 b
+ r zadd zseta{t} 3 c
+ r zadd zsetb{t} 1 b
+ r zadd zsetb{t} 2 c
+ r zadd zsetb{t} 3 d
+ assert_equal 1 [r zdiffstore zsetc{t} 2 zseta{t} zsetb{t}]
+ assert_equal {a 1} [r zrange zsetc{t} 0 -1 withscores]
+ }
+
+ test "ZDIFF algorithm 2 - $encoding" {
+ r del zseta{t} zsetb{t} zsetc{t} zsetd{t} zsete{t}
+ r zadd zseta{t} 1 a
+ r zadd zseta{t} 2 b
+ r zadd zseta{t} 3 c
+ r zadd zseta{t} 5 e
+ r zadd zsetb{t} 1 b
+ r zadd zsetc{t} 1 c
+ r zadd zsetd{t} 1 d
+ assert_equal 2 [r zdiffstore zsete{t} 4 zseta{t} zsetb{t} zsetc{t} zsetd{t}]
+ assert_equal {a 1 e 5} [r zrange zsete{t} 0 -1 withscores]
+ }
+
+ test "ZDIFF fuzzing - $encoding" {
+ for {set j 0} {$j < 100} {incr j} {
+ unset -nocomplain s
+ array set s {}
+ set args {}
+ set num_sets [expr {[randomInt 10]+1}]
+ for {set i 0} {$i < $num_sets} {incr i} {
+ set num_elements [randomInt 100]
+ r del zset_$i{t}
+ lappend args zset_$i{t}
+ while {$num_elements} {
+ set ele [randomValue]
+ r zadd zset_$i{t} [randomInt 100] $ele
+ if {$i == 0} {
+ set s($ele) x
+ } else {
+ unset -nocomplain s($ele)
+ }
+ incr num_elements -1
+ }
+ }
+ set result [lsort [r zdiff [llength $args] {*}$args]]
+ assert_equal $result [lsort [array names s]]
+ }
+ }
+
+ foreach {pop} {ZPOPMIN ZPOPMAX} {
+ test "$pop with the count 0 returns an empty array" {
+ r del zset
+ r zadd zset 1 a 2 b 3 c
+ assert_equal {} [r $pop zset 0]
+
+ # Make sure we can distinguish between an empty array and a null response
+ r readraw 1
+ assert_equal {*0} [r $pop zset 0]
+ r readraw 0
+
+ assert_equal 3 [r zcard zset]
+ }
+
+ test "$pop with negative count" {
+ r set zset foo
+ assert_error "ERR *must be positive" {r $pop zset -1}
+
+ r del zset
+ assert_error "ERR *must be positive" {r $pop zset -2}
+
+ r zadd zset 1 a 2 b 3 c
+ assert_error "ERR *must be positive" {r $pop zset -3}
+ }
+ }
+
+ foreach {popmin popmax} {ZPOPMIN ZPOPMAX ZMPOP_MIN ZMPOP_MAX} {
+ test "Basic $popmin/$popmax with a single key - $encoding" {
+ r del zset
+ verify_zpop_response r $popmin zset 0 {} {}
+
+ create_zset zset {-1 a 1 b 2 c 3 d 4 e}
+ verify_zpop_response r $popmin zset 0 {a -1} {zset {{a -1}}}
+ verify_zpop_response r $popmin zset 0 {b 1} {zset {{b 1}}}
+ verify_zpop_response r $popmax zset 0 {e 4} {zset {{e 4}}}
+ verify_zpop_response r $popmax zset 0 {d 3} {zset {{d 3}}}
+ verify_zpop_response r $popmin zset 0 {c 2} {zset {{c 2}}}
+ assert_equal 0 [r exists zset]
+ }
+
+ test "$popmin/$popmax with count - $encoding" {
+ r del z1
+ verify_zpop_response r $popmin z1 2 {} {}
+
+ create_zset z1 {0 a 1 b 2 c 3 d}
+ verify_zpop_response r $popmin z1 2 {a 0 b 1} {z1 {{a 0} {b 1}}}
+ verify_zpop_response r $popmax z1 2 {d 3 c 2} {z1 {{d 3} {c 2}}}
+ }
+ }
+
+ foreach {popmin popmax} {BZPOPMIN BZPOPMAX BZMPOP_MIN BZMPOP_MAX} {
+ test "$popmin/$popmax with a single existing sorted set - $encoding" {
+ set rd [redis_deferring_client]
+ create_zset zset {0 a 1 b 2 c 3 d}
+
+ verify_bzpop_response $rd $popmin zset 5 0 {zset a 0} {zset {{a 0}}}
+ verify_bzpop_response $rd $popmax zset 5 0 {zset d 3} {zset {{d 3}}}
+ verify_bzpop_response $rd $popmin zset 5 0 {zset b 1} {zset {{b 1}}}
+ verify_bzpop_response $rd $popmax zset 5 0 {zset c 2} {zset {{c 2}}}
+ assert_equal 0 [r exists zset]
+ $rd close
+ }
+
+ test "$popmin/$popmax with multiple existing sorted sets - $encoding" {
+ set rd [redis_deferring_client]
+ create_zset z1{t} {0 a 1 b 2 c}
+ create_zset z2{t} {3 d 4 e 5 f}
+
+ verify_bzpop_two_key_response $rd $popmin z1{t} z2{t} 5 0 {z1{t} a 0} {z1{t} {{a 0}}}
+ verify_bzpop_two_key_response $rd $popmax z1{t} z2{t} 5 0 {z1{t} c 2} {z1{t} {{c 2}}}
+ assert_equal 1 [r zcard z1{t}]
+ assert_equal 3 [r zcard z2{t}]
+
+ verify_bzpop_two_key_response $rd $popmax z2{t} z1{t} 5 0 {z2{t} f 5} {z2{t} {{f 5}}}
+ verify_bzpop_two_key_response $rd $popmin z2{t} z1{t} 5 0 {z2{t} d 3} {z2{t} {{d 3}}}
+ assert_equal 1 [r zcard z1{t}]
+ assert_equal 1 [r zcard z2{t}]
+ $rd close
+ }
+
+ test "$popmin/$popmax second sorted set has members - $encoding" {
+ set rd [redis_deferring_client]
+ r del z1{t}
+ create_zset z2{t} {3 d 4 e 5 f}
+
+ verify_bzpop_two_key_response $rd $popmax z1{t} z2{t} 5 0 {z2{t} f 5} {z2{t} {{f 5}}}
+ verify_bzpop_two_key_response $rd $popmin z1{t} z2{t} 5 0 {z2{t} d 3} {z2{t} {{d 3}}}
+ assert_equal 0 [r zcard z1{t}]
+ assert_equal 1 [r zcard z2{t}]
+ $rd close
+ }
+ }
+
+ foreach {popmin popmax} {ZPOPMIN ZPOPMAX ZMPOP_MIN ZMPOP_MAX} {
+ test "Basic $popmin/$popmax - $encoding RESP3" {
+ r hello 3
+ create_zset z1 {0 a 1 b 2 c 3 d}
+ verify_zpop_response r $popmin z1 0 {a 0.0} {z1 {{a 0.0}}}
+ verify_zpop_response r $popmax z1 0 {d 3.0} {z1 {{d 3.0}}}
+ r hello 2
+ }
+
+ test "$popmin/$popmax with count - $encoding RESP3" {
+ r hello 3
+ create_zset z1 {0 a 1 b 2 c 3 d}
+ verify_zpop_response r $popmin z1 2 {{a 0.0} {b 1.0}} {z1 {{a 0.0} {b 1.0}}}
+ verify_zpop_response r $popmax z1 2 {{d 3.0} {c 2.0}} {z1 {{d 3.0} {c 2.0}}}
+ r hello 2
+ }
+ }
+
+ foreach {popmin popmax} {BZPOPMIN BZPOPMAX BZMPOP_MIN BZMPOP_MAX} {
+ test "$popmin/$popmax - $encoding RESP3" {
+ r hello 3
+ set rd [redis_deferring_client]
+ create_zset zset {0 a 1 b 2 c 3 d}
+
+ verify_bzpop_response $rd $popmin zset 5 0 {zset a 0} {zset {{a 0}}}
+ verify_bzpop_response $rd $popmax zset 5 0 {zset d 3} {zset {{d 3}}}
+ verify_bzpop_response $rd $popmin zset 5 0 {zset b 1} {zset {{b 1}}}
+ verify_bzpop_response $rd $popmax zset 5 0 {zset c 2} {zset {{c 2}}}
+
+ assert_equal 0 [r exists zset]
+ r hello 2
+ $rd close
+ }
+ }
+
+ r config set zset-max-ziplist-entries $original_max_entries
+ r config set zset-max-ziplist-value $original_max_value
+ }
+
+ basics listpack
+ basics skiplist
+
+ test "ZPOP/ZMPOP against wrong type" {
+ r set foo{t} bar
+ assert_error "*WRONGTYPE*" {r zpopmin foo{t}}
+ assert_error "*WRONGTYPE*" {r zpopmin foo{t} 0}
+ assert_error "*WRONGTYPE*" {r zpopmax foo{t}}
+ assert_error "*WRONGTYPE*" {r zpopmax foo{t} 0}
+ assert_error "*WRONGTYPE*" {r zpopmin foo{t} 2}
+
+ assert_error "*WRONGTYPE*" {r zmpop 1 foo{t} min}
+ assert_error "*WRONGTYPE*" {r zmpop 1 foo{t} max}
+ assert_error "*WRONGTYPE*" {r zmpop 1 foo{t} max count 200}
+
+ r del foo{t}
+ r set foo2{t} bar
+ assert_error "*WRONGTYPE*" {r zmpop 2 foo{t} foo2{t} min}
+ assert_error "*WRONGTYPE*" {r zmpop 2 foo2{t} foo1{t} max count 1}
+ }
+
+ test "ZMPOP with illegal argument" {
+ assert_error "ERR wrong number of arguments for 'zmpop' command" {r zmpop}
+ assert_error "ERR wrong number of arguments for 'zmpop' command" {r zmpop 1}
+ assert_error "ERR wrong number of arguments for 'zmpop' command" {r zmpop 1 myzset{t}}
+
+ assert_error "ERR numkeys*" {r zmpop 0 myzset{t} MIN}
+ assert_error "ERR numkeys*" {r zmpop a myzset{t} MIN}
+ assert_error "ERR numkeys*" {r zmpop -1 myzset{t} MAX}
+
+ assert_error "ERR syntax error*" {r zmpop 1 myzset{t} bad_where}
+ assert_error "ERR syntax error*" {r zmpop 1 myzset{t} MIN bar_arg}
+ assert_error "ERR syntax error*" {r zmpop 1 myzset{t} MAX MIN}
+ assert_error "ERR syntax error*" {r zmpop 1 myzset{t} COUNT}
+ assert_error "ERR syntax error*" {r zmpop 1 myzset{t} MAX COUNT 1 COUNT 2}
+ assert_error "ERR syntax error*" {r zmpop 2 myzset{t} myzset2{t} bad_arg}
+
+ assert_error "ERR count*" {r zmpop 1 myzset{t} MIN COUNT 0}
+ assert_error "ERR count*" {r zmpop 1 myzset{t} MAX COUNT a}
+ assert_error "ERR count*" {r zmpop 1 myzset{t} MIN COUNT -1}
+ assert_error "ERR count*" {r zmpop 2 myzset{t} myzset2{t} MAX COUNT -1}
+ }
+
+ test "ZMPOP propagate as pop with count command to replica" {
+ set repl [attach_to_replication_stream]
+
+ # ZMPOP min/max propagate as ZPOPMIN/ZPOPMAX with count
+ r zadd myzset{t} 1 one 2 two 3 three
+
+ # Pop elements from one zset.
+ r zmpop 1 myzset{t} min
+ r zmpop 1 myzset{t} max count 1
+
+ # Now the zset have only one element
+ r zmpop 2 myzset{t} myzset2{t} min count 10
+
+ # No elements so we don't propagate.
+ r zmpop 2 myzset{t} myzset2{t} max count 10
+
+ # Pop elements from the second zset.
+ r zadd myzset2{t} 1 one 2 two 3 three
+ r zmpop 2 myzset{t} myzset2{t} min count 2
+ r zmpop 2 myzset{t} myzset2{t} max count 1
+
+ # Pop all elements.
+ r zadd myzset{t} 1 one 2 two 3 three
+ r zadd myzset2{t} 4 four 5 five 6 six
+ r zmpop 2 myzset{t} myzset2{t} min count 10
+ r zmpop 2 myzset{t} myzset2{t} max count 10
+
+ assert_replication_stream $repl {
+ {select *}
+ {zadd myzset{t} 1 one 2 two 3 three}
+ {zpopmin myzset{t} 1}
+ {zpopmax myzset{t} 1}
+ {zpopmin myzset{t} 1}
+ {zadd myzset2{t} 1 one 2 two 3 three}
+ {zpopmin myzset2{t} 2}
+ {zpopmax myzset2{t} 1}
+ {zadd myzset{t} 1 one 2 two 3 three}
+ {zadd myzset2{t} 4 four 5 five 6 six}
+ {zpopmin myzset{t} 3}
+ {zpopmax myzset2{t} 3}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ foreach resp {3 2} {
+ set rd [redis_deferring_client]
+
+ if {[lsearch $::denytags "resp3"] >= 0} {
+ if {$resp == 3} {continue}
+ } elseif {$::force_resp3} {
+ if {$resp == 2} {continue}
+ }
+ r hello $resp
+ $rd hello $resp
+ $rd read
+
+ test "ZPOPMIN/ZPOPMAX readraw in RESP$resp" {
+ r del zset{t}
+ create_zset zset2{t} {1 a 2 b 3 c 4 d 5 e}
+
+ r readraw 1
+
+ # ZPOP against non existing key.
+ assert_equal {*0} [r zpopmin zset{t}]
+ assert_equal {*0} [r zpopmin zset{t} 1]
+
+ # ZPOP without COUNT option.
+ assert_equal {*2} [r zpopmin zset2{t}]
+ assert_equal [r read] {$1}
+ assert_equal [r read] {a}
+ verify_score_response r $resp 1
+
+ # ZPOP with COUNT option.
+ if {$resp == 2} {
+ assert_equal {*2} [r zpopmax zset2{t} 1]
+ assert_equal [r read] {$1}
+ assert_equal [r read] {e}
+ } elseif {$resp == 3} {
+ assert_equal {*1} [r zpopmax zset2{t} 1]
+ assert_equal [r read] {*2}
+ assert_equal [r read] {$1}
+ assert_equal [r read] {e}
+ }
+ verify_score_response r $resp 5
+
+ r readraw 0
+ }
+
+ test "BZPOPMIN/BZPOPMAX readraw in RESP$resp" {
+ r del zset{t}
+ create_zset zset2{t} {1 a 2 b 3 c 4 d 5 e}
+
+ $rd readraw 1
+
+ # BZPOP released on timeout.
+ $rd bzpopmin zset{t} 0.01
+ verify_nil_response $resp [$rd read]
+ $rd bzpopmax zset{t} 0.01
+ verify_nil_response $resp [$rd read]
+
+ # BZPOP non-blocking path.
+ $rd bzpopmin zset1{t} zset2{t} 0.1
+ assert_equal [$rd read] {*3}
+ assert_equal [$rd read] {$8}
+ assert_equal [$rd read] {zset2{t}}
+ assert_equal [$rd read] {$1}
+ assert_equal [$rd read] {a}
+ verify_score_response $rd $resp 1
+
+ # BZPOP blocking path.
+ $rd bzpopmin zset{t} 5
+ wait_for_blocked_client
+ r zadd zset{t} 1 a
+ assert_equal [$rd read] {*3}
+ assert_equal [$rd read] {$7}
+ assert_equal [$rd read] {zset{t}}
+ assert_equal [$rd read] {$1}
+ assert_equal [$rd read] {a}
+ verify_score_response $rd $resp 1
+
+ $rd readraw 0
+ }
+
+ test "ZMPOP readraw in RESP$resp" {
+ r del zset{t} zset2{t}
+ create_zset zset3{t} {1 a}
+ create_zset zset4{t} {1 a 2 b 3 c 4 d 5 e}
+
+ r readraw 1
+
+ # ZMPOP against non existing key.
+ verify_nil_response $resp [r zmpop 1 zset{t} min]
+ verify_nil_response $resp [r zmpop 1 zset{t} max count 1]
+ verify_nil_response $resp [r zmpop 2 zset{t} zset2{t} min]
+ verify_nil_response $resp [r zmpop 2 zset{t} zset2{t} max count 1]
+
+ # ZMPOP with one input key.
+ assert_equal {*2} [r zmpop 1 zset3{t} max]
+ assert_equal [r read] {$8}
+ assert_equal [r read] {zset3{t}}
+ assert_equal [r read] {*1}
+ assert_equal [r read] {*2}
+ assert_equal [r read] {$1}
+ assert_equal [r read] {a}
+ verify_score_response r $resp 1
+
+ # ZMPOP with COUNT option.
+ assert_equal {*2} [r zmpop 2 zset3{t} zset4{t} min count 2]
+ assert_equal [r read] {$8}
+ assert_equal [r read] {zset4{t}}
+ assert_equal [r read] {*2}
+ assert_equal [r read] {*2}
+ assert_equal [r read] {$1}
+ assert_equal [r read] {a}
+ verify_score_response r $resp 1
+ assert_equal [r read] {*2}
+ assert_equal [r read] {$1}
+ assert_equal [r read] {b}
+ verify_score_response r $resp 2
+
+ r readraw 0
+ }
+
+ test "BZMPOP readraw in RESP$resp" {
+ r del zset{t} zset2{t}
+ create_zset zset3{t} {1 a 2 b 3 c 4 d 5 e}
+
+ $rd readraw 1
+
+ # BZMPOP released on timeout.
+ $rd bzmpop 0.01 1 zset{t} min
+ verify_nil_response $resp [$rd read]
+ $rd bzmpop 0.01 2 zset{t} zset2{t} max
+ verify_nil_response $resp [$rd read]
+
+ # BZMPOP non-blocking path.
+ $rd bzmpop 0.1 2 zset3{t} zset4{t} min
+
+ assert_equal [$rd read] {*2}
+ assert_equal [$rd read] {$8}
+ assert_equal [$rd read] {zset3{t}}
+ assert_equal [$rd read] {*1}
+ assert_equal [$rd read] {*2}
+ assert_equal [$rd read] {$1}
+ assert_equal [$rd read] {a}
+ verify_score_response $rd $resp 1
+
+ # BZMPOP blocking path with COUNT option.
+ $rd bzmpop 5 2 zset{t} zset2{t} max count 2
+ wait_for_blocked_client
+ r zadd zset2{t} 1 a 2 b 3 c
+
+ assert_equal [$rd read] {*2}
+ assert_equal [$rd read] {$8}
+ assert_equal [$rd read] {zset2{t}}
+ assert_equal [$rd read] {*2}
+ assert_equal [$rd read] {*2}
+ assert_equal [$rd read] {$1}
+ assert_equal [$rd read] {c}
+ verify_score_response $rd $resp 3
+ assert_equal [$rd read] {*2}
+ assert_equal [$rd read] {$1}
+ assert_equal [$rd read] {b}
+ verify_score_response $rd $resp 2
+
+ }
+
+ $rd close
+ r hello 2
+ }
+
+ test {ZINTERSTORE regression with two sets, intset+hashtable} {
+ r del seta{t} setb{t} setc{t}
+ r sadd set1{t} a
+ r sadd set2{t} 10
+ r zinterstore set3{t} 2 set1{t} set2{t}
+ } {0}
+
+ test {ZUNIONSTORE regression, should not create NaN in scores} {
+ r zadd z{t} -inf neginf
+ r zunionstore out{t} 1 z{t} weights 0
+ r zrange out{t} 0 -1 withscores
+ } {neginf 0}
+
+ test {ZINTERSTORE #516 regression, mixed sets and ziplist zsets} {
+ r sadd one{t} 100 101 102 103
+ r sadd two{t} 100 200 201 202
+ r zadd three{t} 1 500 1 501 1 502 1 503 1 100
+ r zinterstore to_here{t} 3 one{t} two{t} three{t} WEIGHTS 0 0 1
+ r zrange to_here{t} 0 -1
+ } {100}
+
+ test {ZUNIONSTORE result is sorted} {
+ # Create two sets with common and not common elements, perform
+ # the UNION, check that elements are still sorted.
+ r del one{t} two{t} dest{t}
+ set cmd1 [list r zadd one{t}]
+ set cmd2 [list r zadd two{t}]
+ for {set j 0} {$j < 1000} {incr j} {
+ lappend cmd1 [expr rand()] [randomInt 1000]
+ lappend cmd2 [expr rand()] [randomInt 1000]
+ }
+ {*}$cmd1
+ {*}$cmd2
+ assert {[r zcard one{t}] > 100}
+ assert {[r zcard two{t}] > 100}
+ r zunionstore dest{t} 2 one{t} two{t}
+ set oldscore 0
+ foreach {ele score} [r zrange dest{t} 0 -1 withscores] {
+ assert {$score >= $oldscore}
+ set oldscore $score
+ }
+ }
+
+ test "ZUNIONSTORE/ZINTERSTORE/ZDIFFSTORE error if using WITHSCORES " {
+ assert_error "*ERR*syntax*" {r zunionstore foo{t} 2 zsetd{t} zsetf{t} withscores}
+ assert_error "*ERR*syntax*" {r zinterstore foo{t} 2 zsetd{t} zsetf{t} withscores}
+ assert_error "*ERR*syntax*" {r zdiffstore foo{t} 2 zsetd{t} zsetf{t} withscores}
+ }
+
+ test {ZMSCORE retrieve} {
+ r del zmscoretest
+ r zadd zmscoretest 10 x
+ r zadd zmscoretest 20 y
+
+ r zmscore zmscoretest x y
+ } {10 20}
+
+ test {ZMSCORE retrieve from empty set} {
+ r del zmscoretest
+
+ r zmscore zmscoretest x y
+ } {{} {}}
+
+ test {ZMSCORE retrieve with missing member} {
+ r del zmscoretest
+ r zadd zmscoretest 10 x
+
+ r zmscore zmscoretest x y
+ } {10 {}}
+
+ test {ZMSCORE retrieve single member} {
+ r del zmscoretest
+ r zadd zmscoretest 10 x
+ r zadd zmscoretest 20 y
+
+ r zmscore zmscoretest x
+ } {10}
+
+ test {ZMSCORE retrieve requires one or more members} {
+ r del zmscoretest
+ r zadd zmscoretest 10 x
+ r zadd zmscoretest 20 y
+
+ catch {r zmscore zmscoretest} e
+ assert_match {*ERR*wrong*number*arg*} $e
+ }
+
+ test "ZSET commands don't accept the empty strings as valid score" {
+ assert_error "*not*float*" {r zadd myzset "" abc}
+ }
+
+ test "zunionInterDiffGenericCommand at least 1 input key" {
+ assert_error {*at least 1 input key * 'zunion' command} {r zunion 0 key{t}}
+ assert_error {*at least 1 input key * 'zunionstore' command} {r zunionstore dst_key{t} 0 key{t}}
+ assert_error {*at least 1 input key * 'zinter' command} {r zinter 0 key{t}}
+ assert_error {*at least 1 input key * 'zinterstore' command} {r zinterstore dst_key{t} 0 key{t}}
+ assert_error {*at least 1 input key * 'zdiff' command} {r zdiff 0 key{t}}
+ assert_error {*at least 1 input key * 'zdiffstore' command} {r zdiffstore dst_key{t} 0 key{t}}
+ assert_error {*at least 1 input key * 'zintercard' command} {r zintercard 0 key{t}}
+ }
+
+ proc stressers {encoding} {
+ set original_max_entries [lindex [r config get zset-max-ziplist-entries] 1]
+ set original_max_value [lindex [r config get zset-max-ziplist-value] 1]
+ if {$encoding == "listpack"} {
+ # Little extra to allow proper fuzzing in the sorting stresser
+ r config set zset-max-ziplist-entries 256
+ r config set zset-max-ziplist-value 64
+ set elements 128
+ } elseif {$encoding == "skiplist"} {
+ r config set zset-max-ziplist-entries 0
+ r config set zset-max-ziplist-value 0
+ if {$::accurate} {set elements 1000} else {set elements 100}
+ } else {
+ puts "Unknown sorted set encoding"
+ exit
+ }
+
+ test "ZSCORE - $encoding" {
+ r del zscoretest
+ set aux {}
+ for {set i 0} {$i < $elements} {incr i} {
+ set score [expr rand()]
+ lappend aux $score
+ r zadd zscoretest $score $i
+ }
+
+ assert_encoding $encoding zscoretest
+ for {set i 0} {$i < $elements} {incr i} {
+ # If an IEEE 754 double-precision number is converted to a decimal string with at
+ # least 17 significant digits (reply of zscore), and then converted back to double-precision representation,
+ # the final result replied via zscore command must match the original number present on the $aux list.
+ # Given Tcl is mostly very relaxed about types (everything is a string) we need to use expr to convert a string to float.
+ assert_equal [expr [lindex $aux $i]] [expr [r zscore zscoretest $i]]
+ }
+ }
+
+ test "ZMSCORE - $encoding" {
+ r del zscoretest
+ set aux {}
+ for {set i 0} {$i < $elements} {incr i} {
+ set score [expr rand()]
+ lappend aux $score
+ r zadd zscoretest $score $i
+ }
+
+ assert_encoding $encoding zscoretest
+ for {set i 0} {$i < $elements} {incr i} {
+ # Check above notes on IEEE 754 double-precision comparison
+ assert_equal [expr [lindex $aux $i]] [expr [r zscore zscoretest $i]]
+ }
+ }
+
+ test "ZSCORE after a DEBUG RELOAD - $encoding" {
+ r del zscoretest
+ set aux {}
+ for {set i 0} {$i < $elements} {incr i} {
+ set score [expr rand()]
+ lappend aux $score
+ r zadd zscoretest $score $i
+ }
+
+ r debug reload
+ assert_encoding $encoding zscoretest
+ for {set i 0} {$i < $elements} {incr i} {
+ # Check above notes on IEEE 754 double-precision comparison
+ assert_equal [expr [lindex $aux $i]] [expr [r zscore zscoretest $i]]
+ }
+ } {} {needs:debug}
+
+ test "ZSET sorting stresser - $encoding" {
+ set delta 0
+ for {set test 0} {$test < 2} {incr test} {
+ unset -nocomplain auxarray
+ array set auxarray {}
+ set auxlist {}
+ r del myzset
+ for {set i 0} {$i < $elements} {incr i} {
+ if {$test == 0} {
+ set score [expr rand()]
+ } else {
+ set score [expr int(rand()*10)]
+ }
+ set auxarray($i) $score
+ r zadd myzset $score $i
+ # Random update
+ if {[expr rand()] < .2} {
+ set j [expr int(rand()*1000)]
+ if {$test == 0} {
+ set score [expr rand()]
+ } else {
+ set score [expr int(rand()*10)]
+ }
+ set auxarray($j) $score
+ r zadd myzset $score $j
+ }
+ }
+ foreach {item score} [array get auxarray] {
+ lappend auxlist [list $score $item]
+ }
+ set sorted [lsort -command zlistAlikeSort $auxlist]
+ set auxlist {}
+ foreach x $sorted {
+ lappend auxlist [lindex $x 1]
+ }
+
+ assert_encoding $encoding myzset
+ set fromredis [r zrange myzset 0 -1]
+ set delta 0
+ for {set i 0} {$i < [llength $fromredis]} {incr i} {
+ if {[lindex $fromredis $i] != [lindex $auxlist $i]} {
+ incr delta
+ }
+ }
+ }
+ assert_equal 0 $delta
+ }
+
+ test "ZRANGEBYSCORE fuzzy test, 100 ranges in $elements element sorted set - $encoding" {
+ set err {}
+ r del zset
+ for {set i 0} {$i < $elements} {incr i} {
+ r zadd zset [expr rand()] $i
+ }
+
+ assert_encoding $encoding zset
+ for {set i 0} {$i < 100} {incr i} {
+ set min [expr rand()]
+ set max [expr rand()]
+ if {$min > $max} {
+ set aux $min
+ set min $max
+ set max $aux
+ }
+ set low [r zrangebyscore zset -inf $min]
+ set ok [r zrangebyscore zset $min $max]
+ set high [r zrangebyscore zset $max +inf]
+ set lowx [r zrangebyscore zset -inf ($min]
+ set okx [r zrangebyscore zset ($min ($max]
+ set highx [r zrangebyscore zset ($max +inf]
+
+ if {[r zcount zset -inf $min] != [llength $low]} {
+ append err "Error, len does not match zcount\n"
+ }
+ if {[r zcount zset $min $max] != [llength $ok]} {
+ append err "Error, len does not match zcount\n"
+ }
+ if {[r zcount zset $max +inf] != [llength $high]} {
+ append err "Error, len does not match zcount\n"
+ }
+ if {[r zcount zset -inf ($min] != [llength $lowx]} {
+ append err "Error, len does not match zcount\n"
+ }
+ if {[r zcount zset ($min ($max] != [llength $okx]} {
+ append err "Error, len does not match zcount\n"
+ }
+ if {[r zcount zset ($max +inf] != [llength $highx]} {
+ append err "Error, len does not match zcount\n"
+ }
+
+ foreach x $low {
+ set score [r zscore zset $x]
+ if {$score > $min} {
+ append err "Error, score for $x is $score > $min\n"
+ }
+ }
+ foreach x $lowx {
+ set score [r zscore zset $x]
+ if {$score >= $min} {
+ append err "Error, score for $x is $score >= $min\n"
+ }
+ }
+ foreach x $ok {
+ set score [r zscore zset $x]
+ if {$score < $min || $score > $max} {
+ append err "Error, score for $x is $score outside $min-$max range\n"
+ }
+ }
+ foreach x $okx {
+ set score [r zscore zset $x]
+ if {$score <= $min || $score >= $max} {
+ append err "Error, score for $x is $score outside $min-$max open range\n"
+ }
+ }
+ foreach x $high {
+ set score [r zscore zset $x]
+ if {$score < $max} {
+ append err "Error, score for $x is $score < $max\n"
+ }
+ }
+ foreach x $highx {
+ set score [r zscore zset $x]
+ if {$score <= $max} {
+ append err "Error, score for $x is $score <= $max\n"
+ }
+ }
+ }
+ assert_equal {} $err
+ }
+
+ test "ZRANGEBYLEX fuzzy test, 100 ranges in $elements element sorted set - $encoding" {
+ set lexset {}
+ r del zset
+ for {set j 0} {$j < $elements} {incr j} {
+ set e [randstring 0 30 alpha]
+ lappend lexset $e
+ r zadd zset 0 $e
+ }
+ set lexset [lsort -unique $lexset]
+ for {set j 0} {$j < 100} {incr j} {
+ set min [randstring 0 30 alpha]
+ set max [randstring 0 30 alpha]
+ set mininc [randomInt 2]
+ set maxinc [randomInt 2]
+ if {$mininc} {set cmin "\[$min"} else {set cmin "($min"}
+ if {$maxinc} {set cmax "\[$max"} else {set cmax "($max"}
+ set rev [randomInt 2]
+ if {$rev} {
+ set cmd zrevrangebylex
+ } else {
+ set cmd zrangebylex
+ }
+
+ # Make sure data is the same in both sides
+ assert {[r zrange zset 0 -1] eq $lexset}
+
+ # Get the Redis output
+ set output [r $cmd zset $cmin $cmax]
+ if {$rev} {
+ set outlen [r zlexcount zset $cmax $cmin]
+ } else {
+ set outlen [r zlexcount zset $cmin $cmax]
+ }
+
+ # Compute the same output via Tcl
+ set o {}
+ set copy $lexset
+ if {(!$rev && [string compare $min $max] > 0) ||
+ ($rev && [string compare $max $min] > 0)} {
+ # Empty output when ranges are inverted.
+ } else {
+ if {$rev} {
+ # Invert the Tcl array using Redis itself.
+ set copy [r zrevrange zset 0 -1]
+ # Invert min / max as well
+ lassign [list $min $max $mininc $maxinc] \
+ max min maxinc mininc
+ }
+ foreach e $copy {
+ set mincmp [string compare $e $min]
+ set maxcmp [string compare $e $max]
+ if {
+ ($mininc && $mincmp >= 0 || !$mininc && $mincmp > 0)
+ &&
+ ($maxinc && $maxcmp <= 0 || !$maxinc && $maxcmp < 0)
+ } {
+ lappend o $e
+ }
+ }
+ }
+ assert {$o eq $output}
+ assert {$outlen eq [llength $output]}
+ }
+ }
+
+ test "ZREMRANGEBYLEX fuzzy test, 100 ranges in $elements element sorted set - $encoding" {
+ set lexset {}
+ r del zset{t} zsetcopy{t}
+ for {set j 0} {$j < $elements} {incr j} {
+ set e [randstring 0 30 alpha]
+ lappend lexset $e
+ r zadd zset{t} 0 $e
+ }
+ set lexset [lsort -unique $lexset]
+ for {set j 0} {$j < 100} {incr j} {
+ # Copy...
+ r zunionstore zsetcopy{t} 1 zset{t}
+ set lexsetcopy $lexset
+
+ set min [randstring 0 30 alpha]
+ set max [randstring 0 30 alpha]
+ set mininc [randomInt 2]
+ set maxinc [randomInt 2]
+ if {$mininc} {set cmin "\[$min"} else {set cmin "($min"}
+ if {$maxinc} {set cmax "\[$max"} else {set cmax "($max"}
+
+ # Make sure data is the same in both sides
+ assert {[r zrange zset{t} 0 -1] eq $lexset}
+
+ # Get the range we are going to remove
+ set torem [r zrangebylex zset{t} $cmin $cmax]
+ set toremlen [r zlexcount zset{t} $cmin $cmax]
+ r zremrangebylex zsetcopy{t} $cmin $cmax
+ set output [r zrange zsetcopy{t} 0 -1]
+
+ # Remove the range with Tcl from the original list
+ if {$toremlen} {
+ set first [lsearch -exact $lexsetcopy [lindex $torem 0]]
+ set last [expr {$first+$toremlen-1}]
+ set lexsetcopy [lreplace $lexsetcopy $first $last]
+ }
+ assert {$lexsetcopy eq $output}
+ }
+ }
+
+ test "ZSETs skiplist implementation backlink consistency test - $encoding" {
+ set diff 0
+ for {set j 0} {$j < $elements} {incr j} {
+ r zadd myzset [expr rand()] "Element-$j"
+ r zrem myzset "Element-[expr int(rand()*$elements)]"
+ }
+
+ assert_encoding $encoding myzset
+ set l1 [r zrange myzset 0 -1]
+ set l2 [r zrevrange myzset 0 -1]
+ for {set j 0} {$j < [llength $l1]} {incr j} {
+ if {[lindex $l1 $j] ne [lindex $l2 end-$j]} {
+ incr diff
+ }
+ }
+ assert_equal 0 $diff
+ }
+
+ test "ZSETs ZRANK augmented skip list stress testing - $encoding" {
+ set err {}
+ r del myzset
+ for {set k 0} {$k < 2000} {incr k} {
+ set i [expr {$k % $elements}]
+ if {[expr rand()] < .2} {
+ r zrem myzset $i
+ } else {
+ set score [expr rand()]
+ r zadd myzset $score $i
+ assert_encoding $encoding myzset
+ }
+
+ set card [r zcard myzset]
+ if {$card > 0} {
+ set index [randomInt $card]
+ set ele [lindex [r zrange myzset $index $index] 0]
+ set rank [r zrank myzset $ele]
+ if {$rank != $index} {
+ set err "$ele RANK is wrong! ($rank != $index)"
+ break
+ }
+ }
+ }
+ assert_equal {} $err
+ }
+
+ foreach {pop} {BZPOPMIN BZMPOP_MIN} {
+ test "$pop, ZADD + DEL should not awake blocked client" {
+ set rd [redis_deferring_client]
+ r del zset
+
+ bzpop_command $rd $pop zset 0
+ wait_for_blocked_client
+
+ r multi
+ r zadd zset 0 foo
+ r del zset
+ r exec
+ r del zset
+ r zadd zset 1 bar
+
+ verify_pop_response $pop [$rd read] {zset bar 1} {zset {{bar 1}}}
+ $rd close
+ }
+
+ test "$pop, ZADD + DEL + SET should not awake blocked client" {
+ set rd [redis_deferring_client]
+ r del zset
+
+ bzpop_command $rd $pop zset 0
+ wait_for_blocked_client
+
+ r multi
+ r zadd zset 0 foo
+ r del zset
+ r set zset foo
+ r exec
+ r del zset
+ r zadd zset 1 bar
+
+ verify_pop_response $pop [$rd read] {zset bar 1} {zset {{bar 1}}}
+ $rd close
+ }
+ }
+
+ test "BZPOPMIN with same key multiple times should work" {
+ set rd [redis_deferring_client]
+ r del z1{t} z2{t}
+
+ # Data arriving after the BZPOPMIN.
+ $rd bzpopmin z1{t} z2{t} z2{t} z1{t} 0
+ wait_for_blocked_client
+ r zadd z1{t} 0 a
+ assert_equal [$rd read] {z1{t} a 0}
+ $rd bzpopmin z1{t} z2{t} z2{t} z1{t} 0
+ wait_for_blocked_client
+ r zadd z2{t} 1 b
+ assert_equal [$rd read] {z2{t} b 1}
+
+ # Data already there.
+ r zadd z1{t} 0 a
+ r zadd z2{t} 1 b
+ $rd bzpopmin z1{t} z2{t} z2{t} z1{t} 0
+ assert_equal [$rd read] {z1{t} a 0}
+ $rd bzpopmin z1{t} z2{t} z2{t} z1{t} 0
+ assert_equal [$rd read] {z2{t} b 1}
+ $rd close
+ }
+
+ foreach {pop} {BZPOPMIN BZMPOP_MIN} {
+ test "MULTI/EXEC is isolated from the point of view of $pop" {
+ set rd [redis_deferring_client]
+ r del zset
+
+ bzpop_command $rd $pop zset 0
+ wait_for_blocked_client
+
+ r multi
+ r zadd zset 0 a
+ r zadd zset 1 b
+ r zadd zset 2 c
+ r exec
+
+ verify_pop_response $pop [$rd read] {zset a 0} {zset {{a 0}}}
+ $rd close
+ }
+
+ test "$pop with variadic ZADD" {
+ set rd [redis_deferring_client]
+ r del zset
+ if {$::valgrind} {after 100}
+ bzpop_command $rd $pop zset 0
+ wait_for_blocked_client
+ if {$::valgrind} {after 100}
+ assert_equal 2 [r zadd zset -1 foo 1 bar]
+ if {$::valgrind} {after 100}
+ verify_pop_response $pop [$rd read] {zset foo -1} {zset {{foo -1}}}
+ assert_equal {bar} [r zrange zset 0 -1]
+ $rd close
+ }
+
+ test "$pop with zero timeout should block indefinitely" {
+ set rd [redis_deferring_client]
+ r del zset
+ bzpop_command $rd $pop zset 0
+ wait_for_blocked_client
+ after 1000
+ r zadd zset 0 foo
+ verify_pop_response $pop [$rd read] {zset foo 0} {zset {{foo 0}}}
+ $rd close
+ }
+ }
+
+ r config set zset-max-ziplist-entries $original_max_entries
+ r config set zset-max-ziplist-value $original_max_value
+ }
+
+ tags {"slow"} {
+ stressers listpack
+ stressers skiplist
+ }
+
+ test "BZPOP/BZMPOP against wrong type" {
+ r set foo{t} bar
+ assert_error "*WRONGTYPE*" {r bzpopmin foo{t} 1}
+ assert_error "*WRONGTYPE*" {r bzpopmax foo{t} 1}
+
+ assert_error "*WRONGTYPE*" {r bzmpop 1 1 foo{t} min}
+ assert_error "*WRONGTYPE*" {r bzmpop 1 1 foo{t} max}
+ assert_error "*WRONGTYPE*" {r bzmpop 1 1 foo{t} min count 10}
+
+ r del foo{t}
+ r set foo2{t} bar
+ assert_error "*WRONGTYPE*" {r bzmpop 1 2 foo{t} foo2{t} min}
+ assert_error "*WRONGTYPE*" {r bzmpop 1 2 foo2{t} foo{t} max count 1}
+ }
+
+ test "BZMPOP with illegal argument" {
+ assert_error "ERR wrong number of arguments for 'bzmpop' command" {r bzmpop}
+ assert_error "ERR wrong number of arguments for 'bzmpop' command" {r bzmpop 0 1}
+ assert_error "ERR wrong number of arguments for 'bzmpop' command" {r bzmpop 0 1 myzset{t}}
+
+ assert_error "ERR numkeys*" {r bzmpop 1 0 myzset{t} MIN}
+ assert_error "ERR numkeys*" {r bzmpop 1 a myzset{t} MIN}
+ assert_error "ERR numkeys*" {r bzmpop 1 -1 myzset{t} MAX}
+
+ assert_error "ERR syntax error*" {r bzmpop 1 1 myzset{t} bad_where}
+ assert_error "ERR syntax error*" {r bzmpop 1 1 myzset{t} MIN bar_arg}
+ assert_error "ERR syntax error*" {r bzmpop 1 1 myzset{t} MAX MIN}
+ assert_error "ERR syntax error*" {r bzmpop 1 1 myzset{t} COUNT}
+ assert_error "ERR syntax error*" {r bzmpop 1 1 myzset{t} MIN COUNT 1 COUNT 2}
+ assert_error "ERR syntax error*" {r bzmpop 1 2 myzset{t} myzset2{t} bad_arg}
+
+ assert_error "ERR count*" {r bzmpop 1 1 myzset{t} MIN COUNT 0}
+ assert_error "ERR count*" {r bzmpop 1 1 myzset{t} MAX COUNT a}
+ assert_error "ERR count*" {r bzmpop 1 1 myzset{t} MIN COUNT -1}
+ assert_error "ERR count*" {r bzmpop 1 2 myzset{t} myzset2{t} MAX COUNT -1}
+ }
+
+ test "BZMPOP with multiple blocked clients" {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+ set rd3 [redis_deferring_client]
+ set rd4 [redis_deferring_client]
+ r del myzset{t} myzset2{t}
+
+ $rd1 bzmpop 0 2 myzset{t} myzset2{t} min count 1
+ wait_for_blocked_clients_count 1
+ $rd2 bzmpop 0 2 myzset{t} myzset2{t} max count 10
+ wait_for_blocked_clients_count 2
+ $rd3 bzmpop 0 2 myzset{t} myzset2{t} min count 10
+ wait_for_blocked_clients_count 3
+ $rd4 bzmpop 0 2 myzset{t} myzset2{t} max count 1
+ wait_for_blocked_clients_count 4
+
+ r multi
+ r zadd myzset{t} 1 a 2 b 3 c 4 d 5 e
+ r zadd myzset2{t} 1 a 2 b 3 c 4 d 5 e
+ r exec
+
+ assert_equal {myzset{t} {{a 1}}} [$rd1 read]
+ assert_equal {myzset{t} {{e 5} {d 4} {c 3} {b 2}}} [$rd2 read]
+ assert_equal {myzset2{t} {{a 1} {b 2} {c 3} {d 4} {e 5}}} [$rd3 read]
+
+ r zadd myzset2{t} 1 a 2 b 3 c
+ assert_equal {myzset2{t} {{c 3}}} [$rd4 read]
+
+ r del myzset{t} myzset2{t}
+ $rd1 close
+ $rd2 close
+ $rd3 close
+ $rd4 close
+ }
+
+ test "BZMPOP propagate as pop with count command to replica" {
+ set rd [redis_deferring_client]
+ set repl [attach_to_replication_stream]
+
+ # BZMPOP without being blocked.
+ r zadd myzset{t} 1 one 2 two 3 three
+ r zadd myzset2{t} 4 four 5 five 6 six
+ r bzmpop 0 1 myzset{t} min
+ r bzmpop 0 2 myzset{t} myzset2{t} max count 10
+ r bzmpop 0 2 myzset{t} myzset2{t} max count 10
+
+ # BZMPOP that gets blocked.
+ $rd bzmpop 0 1 myzset{t} min count 1
+ wait_for_blocked_client
+ r zadd myzset{t} 1 one
+ $rd bzmpop 0 2 myzset{t} myzset2{t} min count 5
+ wait_for_blocked_client
+ r zadd myzset{t} 1 one 2 two 3 three
+ $rd bzmpop 0 2 myzset{t} myzset2{t} max count 10
+ wait_for_blocked_client
+ r zadd myzset2{t} 4 four 5 five 6 six
+
+ # Released on timeout.
+ assert_equal {} [r bzmpop 0.01 1 myzset{t} max count 10]
+ r set foo{t} bar ;# something else to propagate after, so we can make sure the above pop didn't.
+
+ $rd close
+
+ assert_replication_stream $repl {
+ {select *}
+ {zadd myzset{t} 1 one 2 two 3 three}
+ {zadd myzset2{t} 4 four 5 five 6 six}
+ {zpopmin myzset{t} 1}
+ {zpopmax myzset{t} 2}
+ {zpopmax myzset2{t} 3}
+ {zadd myzset{t} 1 one}
+ {zpopmin myzset{t} 1}
+ {zadd myzset{t} 1 one 2 two 3 three}
+ {zpopmin myzset{t} 3}
+ {zadd myzset2{t} 4 four 5 five 6 six}
+ {zpopmax myzset2{t} 3}
+ {set foo{t} bar}
+ }
+ close_replication_stream $repl
+ } {} {needs:repl}
+
+ test "BZMPOP should not blocks on non key arguments - #10762" {
+ set rd1 [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+ r del myzset myzset2 myzset3
+
+ $rd1 bzmpop 0 1 myzset min count 10
+ wait_for_blocked_clients_count 1
+ $rd2 bzmpop 0 2 myzset2 myzset3 max count 10
+ wait_for_blocked_clients_count 2
+
+ # These non-key keys will not unblock the clients.
+ r zadd 0 100 timeout_value
+ r zadd 1 200 numkeys_value
+ r zadd min 300 min_token
+ r zadd max 400 max_token
+ r zadd count 500 count_token
+ r zadd 10 600 count_value
+
+ r zadd myzset 1 zset
+ r zadd myzset3 1 zset3
+ assert_equal {myzset {{zset 1}}} [$rd1 read]
+ assert_equal {myzset3 {{zset3 1}}} [$rd2 read]
+
+ $rd1 close
+ $rd2 close
+ } {0} {cluster:skip}
+
+ test {ZSET skiplist order consistency when elements are moved} {
+ set original_max [lindex [r config get zset-max-ziplist-entries] 1]
+ r config set zset-max-ziplist-entries 0
+ for {set times 0} {$times < 10} {incr times} {
+ r del zset
+ for {set j 0} {$j < 1000} {incr j} {
+ r zadd zset [randomInt 50] ele-[randomInt 10]
+ }
+
+ # Make sure that element ordering is correct
+ set prev_element {}
+ set prev_score -1
+ foreach {element score} [r zrange zset 0 -1 WITHSCORES] {
+ # Assert that elements are in increasing ordering
+ assert {
+ $prev_score < $score ||
+ ($prev_score == $score &&
+ [string compare $prev_element $element] == -1)
+ }
+ set prev_element $element
+ set prev_score $score
+ }
+ }
+ r config set zset-max-ziplist-entries $original_max
+ }
+
+ test {ZRANGESTORE basic} {
+ r flushall
+ r zadd z1{t} 1 a 2 b 3 c 4 d
+ set res [r zrangestore z2{t} z1{t} 0 -1]
+ assert_equal $res 4
+ r zrange z2{t} 0 -1 withscores
+ } {a 1 b 2 c 3 d 4}
+
+ test {ZRANGESTORE RESP3} {
+ r hello 3
+ assert_equal [r zrange z2{t} 0 -1 withscores] {{a 1.0} {b 2.0} {c 3.0} {d 4.0}}
+ r hello 2
+ }
+
+ test {ZRANGESTORE range} {
+ set res [r zrangestore z2{t} z1{t} 1 2]
+ assert_equal $res 2
+ r zrange z2{t} 0 -1 withscores
+ } {b 2 c 3}
+
+ test {ZRANGESTORE BYLEX} {
+ set res [r zrangestore z2{t} z1{t} \[b \[c BYLEX]
+ assert_equal $res 2
+ r zrange z2{t} 0 -1 withscores
+ } {b 2 c 3}
+
+ test {ZRANGESTORE BYSCORE} {
+ set res [r zrangestore z2{t} z1{t} 1 2 BYSCORE]
+ assert_equal $res 2
+ r zrange z2{t} 0 -1 withscores
+ } {a 1 b 2}
+
+ test {ZRANGESTORE BYSCORE LIMIT} {
+ set res [r zrangestore z2{t} z1{t} 0 5 BYSCORE LIMIT 0 2]
+ assert_equal $res 2
+ r zrange z2{t} 0 -1 withscores
+ } {a 1 b 2}
+
+ test {ZRANGESTORE BYSCORE REV LIMIT} {
+ set res [r zrangestore z2{t} z1{t} 5 0 BYSCORE REV LIMIT 0 2]
+ assert_equal $res 2
+ r zrange z2{t} 0 -1 withscores
+ } {c 3 d 4}
+
+ test {ZRANGE BYSCORE REV LIMIT} {
+ r zrange z1{t} 5 0 BYSCORE REV LIMIT 0 2 WITHSCORES
+ } {d 4 c 3}
+
+ test {ZRANGESTORE - src key missing} {
+ set res [r zrangestore z2{t} missing{t} 0 -1]
+ assert_equal $res 0
+ r exists z2{t}
+ } {0}
+
+ test {ZRANGESTORE - src key wrong type} {
+ r zadd z2{t} 1 a
+ r set foo{t} bar
+ assert_error "*WRONGTYPE*" {r zrangestore z2{t} foo{t} 0 -1}
+ r zrange z2{t} 0 -1
+ } {a}
+
+ test {ZRANGESTORE - empty range} {
+ set res [r zrangestore z2{t} z1{t} 5 6]
+ assert_equal $res 0
+ r exists z2{t}
+ } {0}
+
+ test {ZRANGESTORE BYLEX - empty range} {
+ set res [r zrangestore z2{t} z1{t} \[f \[g BYLEX]
+ assert_equal $res 0
+ r exists z2{t}
+ } {0}
+
+ test {ZRANGESTORE BYSCORE - empty range} {
+ set res [r zrangestore z2{t} z1{t} 5 6 BYSCORE]
+ assert_equal $res 0
+ r exists z2{t}
+ } {0}
+
+ test {ZRANGE BYLEX} {
+ r zrange z1{t} \[b \[c BYLEX
+ } {b c}
+
+ test {ZRANGESTORE invalid syntax} {
+ catch {r zrangestore z2{t} z1{t} 0 -1 limit 1 2} err
+ assert_match "*syntax*" $err
+ catch {r zrangestore z2{t} z1{t} 0 -1 WITHSCORES} err
+ assert_match "*syntax*" $err
+ }
+
+ test {ZRANGESTORE with zset-max-listpack-entries 0 #10767 case} {
+ set original_max [lindex [r config get zset-max-listpack-entries] 1]
+ r config set zset-max-listpack-entries 0
+ r del z1{t} z2{t}
+ r zadd z1{t} 1 a
+ assert_encoding skiplist z1{t}
+ assert_equal 1 [r zrangestore z2{t} z1{t} 0 -1]
+ assert_encoding skiplist z2{t}
+ r config set zset-max-listpack-entries $original_max
+ }
+
+ test {ZRANGESTORE with zset-max-listpack-entries 1 dst key should use skiplist encoding} {
+ set original_max [lindex [r config get zset-max-listpack-entries] 1]
+ r config set zset-max-listpack-entries 1
+ r del z1{t} z2{t} z3{t}
+ r zadd z1{t} 1 a 2 b
+ assert_equal 1 [r zrangestore z2{t} z1{t} 0 0]
+ assert_encoding listpack z2{t}
+ assert_equal 2 [r zrangestore z3{t} z1{t} 0 1]
+ assert_encoding skiplist z3{t}
+ r config set zset-max-listpack-entries $original_max
+ }
+
+ test {ZRANGE invalid syntax} {
+ catch {r zrange z1{t} 0 -1 limit 1 2} err
+ assert_match "*syntax*" $err
+ catch {r zrange z1{t} 0 -1 BYLEX WITHSCORES} err
+ assert_match "*syntax*" $err
+ catch {r zrevrange z1{t} 0 -1 BYSCORE} err
+ assert_match "*syntax*" $err
+ catch {r zrangebyscore z1{t} 0 -1 REV} err
+ assert_match "*syntax*" $err
+ }
+
+ proc get_keys {l} {
+ set res {}
+ foreach {score key} $l {
+ lappend res $key
+ }
+ return $res
+ }
+
+ # Check whether the zset members belong to the zset
+ proc check_member {mydict res} {
+ foreach ele $res {
+ assert {[dict exists $mydict $ele]}
+ }
+ }
+
+ # Check whether the zset members and score belong to the zset
+ proc check_member_and_score {mydict res} {
+ foreach {key val} $res {
+ assert_equal $val [dict get $mydict $key]
+ }
+ }
+
+ foreach {type contents} "listpack {1 a 2 b 3 c} skiplist {1 a 2 b 3 [randstring 70 90 alpha]}" {
+ set original_max_value [lindex [r config get zset-max-ziplist-value] 1]
+ r config set zset-max-ziplist-value 10
+ create_zset myzset $contents
+ assert_encoding $type myzset
+
+ test "ZRANDMEMBER - $type" {
+ unset -nocomplain myzset
+ array set myzset {}
+ for {set i 0} {$i < 100} {incr i} {
+ set key [r zrandmember myzset]
+ set myzset($key) 1
+ }
+ assert_equal [lsort [get_keys $contents]] [lsort [array names myzset]]
+ }
+ r config set zset-max-ziplist-value $original_max_value
+ }
+
+ test "ZRANDMEMBER with RESP3" {
+ r hello 3
+ set res [r zrandmember myzset 3 withscores]
+ assert_equal [llength $res] 3
+ assert_equal [llength [lindex $res 1]] 2
+
+ set res [r zrandmember myzset 3]
+ assert_equal [llength $res] 3
+ assert_equal [llength [lindex $res 1]] 1
+ r hello 2
+ }
+
+ test "ZRANDMEMBER count of 0 is handled correctly" {
+ r zrandmember myzset 0
+ } {}
+
+ test "ZRANDMEMBER with <count> against non existing key" {
+ r zrandmember nonexisting_key 100
+ } {}
+
+ test "ZRANDMEMBER count overflow" {
+ r zadd myzset 0 a
+ assert_error {*value is out of range*} {r zrandmember myzset -9223372036854770000 withscores}
+ assert_error {*value is out of range*} {r zrandmember myzset -9223372036854775808 withscores}
+ assert_error {*value is out of range*} {r zrandmember myzset -9223372036854775808}
+ } {}
+
+ # Make sure we can distinguish between an empty array and a null response
+ r readraw 1
+
+ test "ZRANDMEMBER count of 0 is handled correctly - emptyarray" {
+ r zrandmember myzset 0
+ } {*0}
+
+ test "ZRANDMEMBER with <count> against non existing key - emptyarray" {
+ r zrandmember nonexisting_key 100
+ } {*0}
+
+ r readraw 0
+
+ foreach {type contents} "
+ skiplist {1 a 2 b 3 c 4 d 5 e 6 f 7 g 7 h 9 i 10 [randstring 70 90 alpha]}
+ listpack {1 a 2 b 3 c 4 d 5 e 6 f 7 g 7 h 9 i 10 j} " {
+ test "ZRANDMEMBER with <count> - $type" {
+ set original_max_value [lindex [r config get zset-max-ziplist-value] 1]
+ r config set zset-max-ziplist-value 10
+ create_zset myzset $contents
+ assert_encoding $type myzset
+
+ # create a dict for easy lookup
+ set mydict [dict create {*}[r zrange myzset 0 -1 withscores]]
+
+ # We'll stress different parts of the code, see the implementation
+ # of ZRANDMEMBER for more information, but basically there are
+ # four different code paths.
+
+ # PATH 1: Use negative count.
+
+ # 1) Check that it returns repeated elements with and without values.
+ # 2) Check that all the elements actually belong to the original zset.
+ set res [r zrandmember myzset -20]
+ assert_equal [llength $res] 20
+ check_member $mydict $res
+
+ set res [r zrandmember myzset -1001]
+ assert_equal [llength $res] 1001
+ check_member $mydict $res
+
+ # again with WITHSCORES
+ set res [r zrandmember myzset -20 withscores]
+ assert_equal [llength $res] 40
+ check_member_and_score $mydict $res
+
+ set res [r zrandmember myzset -1001 withscores]
+ assert_equal [llength $res] 2002
+ check_member_and_score $mydict $res
+
+ # Test random uniform distribution
+ # df = 9, 40 means 0.00001 probability
+ set res [r zrandmember myzset -1000]
+ assert_lessthan [chi_square_value $res] 40
+ check_member $mydict $res
+
+ # 3) Check that eventually all the elements are returned.
+ # Use both WITHSCORES and without
+ unset -nocomplain auxset
+ set iterations 1000
+ while {$iterations != 0} {
+ incr iterations -1
+ if {[expr {$iterations % 2}] == 0} {
+ set res [r zrandmember myzset -3 withscores]
+ foreach {key val} $res {
+ dict append auxset $key $val
+ }
+ } else {
+ set res [r zrandmember myzset -3]
+ foreach key $res {
+ dict append auxset $key
+ }
+ }
+ if {[lsort [dict keys $mydict]] eq
+ [lsort [dict keys $auxset]]} {
+ break;
+ }
+ }
+ assert {$iterations != 0}
+
+ # PATH 2: positive count (unique behavior) with requested size
+ # equal or greater than set size.
+ foreach size {10 20} {
+ set res [r zrandmember myzset $size]
+ assert_equal [llength $res] 10
+ assert_equal [lsort $res] [lsort [dict keys $mydict]]
+ check_member $mydict $res
+
+ # again with WITHSCORES
+ set res [r zrandmember myzset $size withscores]
+ assert_equal [llength $res] 20
+ assert_equal [lsort $res] [lsort $mydict]
+ check_member_and_score $mydict $res
+ }
+
+ # PATH 3: Ask almost as elements as there are in the set.
+ # In this case the implementation will duplicate the original
+ # set and will remove random elements up to the requested size.
+ #
+ # PATH 4: Ask a number of elements definitely smaller than
+ # the set size.
+ #
+ # We can test both the code paths just changing the size but
+ # using the same code.
+ foreach size {1 2 8} {
+ # 1) Check that all the elements actually belong to the
+ # original set.
+ set res [r zrandmember myzset $size]
+ assert_equal [llength $res] $size
+ check_member $mydict $res
+
+ # again with WITHSCORES
+ set res [r zrandmember myzset $size withscores]
+ assert_equal [llength $res] [expr {$size * 2}]
+ check_member_and_score $mydict $res
+
+ # 2) Check that eventually all the elements are returned.
+ # Use both WITHSCORES and without
+ unset -nocomplain auxset
+ unset -nocomplain allkey
+ set iterations [expr {1000 / $size}]
+ set all_ele_return false
+ while {$iterations != 0} {
+ incr iterations -1
+ if {[expr {$iterations % 2}] == 0} {
+ set res [r zrandmember myzset $size withscores]
+ foreach {key value} $res {
+ dict append auxset $key $value
+ lappend allkey $key
+ }
+ } else {
+ set res [r zrandmember myzset $size]
+ foreach key $res {
+ dict append auxset $key
+ lappend allkey $key
+ }
+ }
+ if {[lsort [dict keys $mydict]] eq
+ [lsort [dict keys $auxset]]} {
+ set all_ele_return true
+ }
+ }
+ assert_equal $all_ele_return true
+ # df = 9, 40 means 0.00001 probability
+ assert_lessthan [chi_square_value $allkey] 40
+ }
+ }
+ r config set zset-max-ziplist-value $original_max_value
+ }
+
+ test {zset score double range} {
+ set dblmax 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.00000000000000000
+ r del zz
+ r zadd zz $dblmax dblmax
+ assert_encoding listpack zz
+ r zscore zz dblmax
+ } {1.7976931348623157e+308}
+
+ test {zunionInterDiffGenericCommand acts on SET and ZSET} {
+ r del set_small{t} set_big{t} zset_small{t} zset_big{t} zset_dest{t}
+
+ foreach set_type {intset listpack hashtable} {
+ # Restore all default configurations before each round of testing.
+ r config set set-max-intset-entries 512
+ r config set set-max-listpack-entries 128
+ r config set zset-max-listpack-entries 128
+
+ r del set_small{t} set_big{t}
+
+ if {$set_type == "intset"} {
+ r sadd set_small{t} 1 2 3
+ r sadd set_big{t} 1 2 3 4 5
+ assert_encoding intset set_small{t}
+ assert_encoding intset set_big{t}
+ } elseif {$set_type == "listpack"} {
+ # Add an "a" and then remove it, make sure the set is listpack encoding.
+ r sadd set_small{t} a 1 2 3
+ r sadd set_big{t} a 1 2 3 4 5
+ r srem set_small{t} a
+ r srem set_big{t} a
+ assert_encoding listpack set_small{t}
+ assert_encoding listpack set_big{t}
+ } elseif {$set_type == "hashtable"} {
+ r config set set-max-intset-entries 0
+ r config set set-max-listpack-entries 0
+ r sadd set_small{t} 1 2 3
+ r sadd set_big{t} 1 2 3 4 5
+ assert_encoding hashtable set_small{t}
+ assert_encoding hashtable set_big{t}
+ }
+
+ foreach zset_type {listpack skiplist} {
+ r del zset_small{t} zset_big{t}
+
+ if {$zset_type == "listpack"} {
+ r zadd zset_small{t} 1 1 2 2 3 3
+ r zadd zset_big{t} 1 1 2 2 3 3 4 4 5 5
+ assert_encoding listpack zset_small{t}
+ assert_encoding listpack zset_big{t}
+ } elseif {$zset_type == "skiplist"} {
+ r config set zset-max-listpack-entries 0
+ r zadd zset_small{t} 1 1 2 2 3 3
+ r zadd zset_big{t} 1 1 2 2 3 3 4 4 5 5
+ assert_encoding skiplist zset_small{t}
+ assert_encoding skiplist zset_big{t}
+ }
+
+ # Test one key is big and one key is small separately.
+ # The reason for this is because we will sort the sets from smallest to largest.
+ # So set one big key and one small key, then the test can cover more code paths.
+ foreach {small_or_big set_key zset_key} {
+ small set_small{t} zset_big{t}
+ big set_big{t} zset_small{t}
+ } {
+ # The result of these commands are not related to the order of the keys.
+ assert_equal {1 2 3 4 5} [lsort [r zunion 2 $set_key $zset_key]]
+ assert_equal {5} [r zunionstore zset_dest{t} 2 $set_key $zset_key]
+ assert_equal {1 2 3} [lsort [r zinter 2 $set_key $zset_key]]
+ assert_equal {3} [r zinterstore zset_dest{t} 2 $set_key $zset_key]
+ assert_equal {3} [r zintercard 2 $set_key $zset_key]
+
+ # The result of sdiff is related to the order of the keys.
+ if {$small_or_big == "small"} {
+ assert_equal {} [r zdiff 2 $set_key $zset_key]
+ assert_equal {0} [r zdiffstore zset_dest{t} 2 $set_key $zset_key]
+ } else {
+ assert_equal {4 5} [lsort [r zdiff 2 $set_key $zset_key]]
+ assert_equal {2} [r zdiffstore zset_dest{t} 2 $set_key $zset_key]
+ }
+ }
+ }
+ }
+
+ r config set set-max-intset-entries 512
+ r config set set-max-listpack-entries 128
+ r config set zset-max-listpack-entries 128
+ }
+
+ foreach type {single multiple single_multiple} {
+ test "ZADD overflows the maximum allowed elements in a listpack - $type" {
+ r del myzset
+
+ set max_entries 64
+ set original_max [lindex [r config get zset-max-listpack-entries] 1]
+ r config set zset-max-listpack-entries $max_entries
+
+ if {$type == "single"} {
+ # All are single zadd commands.
+ for {set i 0} {$i < $max_entries} {incr i} { r zadd myzset $i $i }
+ } elseif {$type == "multiple"} {
+ # One zadd command to add all elements.
+ set args {}
+ for {set i 0} {$i < $max_entries * 2} {incr i} { lappend args $i }
+ r zadd myzset {*}$args
+ } elseif {$type == "single_multiple"} {
+ # First one zadd adds an element (creates a key) and then one zadd adds all elements.
+ r zadd myzset 1 1
+ set args {}
+ for {set i 0} {$i < $max_entries * 2} {incr i} { lappend args $i }
+ r zadd myzset {*}$args
+ }
+
+ assert_encoding listpack myzset
+ assert_equal $max_entries [r zcard myzset]
+ assert_equal 1 [r zadd myzset 1 b]
+ assert_encoding skiplist myzset
+
+ r config set zset-max-listpack-entries $original_max
+ }
+ }
+}
diff --git a/tests/unit/violations.tcl b/tests/unit/violations.tcl
new file mode 100644
index 0000000..783f306
--- /dev/null
+++ b/tests/unit/violations.tcl
@@ -0,0 +1,103 @@
+# One XADD with one huge 5GB field
+# Expected to fail resulting in an empty stream
+run_solo {violations} {
+start_server [list overrides [list save ""] ] {
+ test {XADD one huge field} {
+ r config set proto-max-bulk-len 10000000000 ;#10gb
+ r config set client-query-buffer-limit 10000000000 ;#10gb
+ r write "*5\r\n\$4\r\nXADD\r\n\$2\r\nS1\r\n\$1\r\n*\r\n"
+ r write "\$1\r\nA\r\n"
+ catch {
+ write_big_bulk 5000000000 ;#5gb
+ } err
+ assert_match {*too large*} $err
+ r xlen S1
+ } {0} {large-memory}
+}
+
+# One XADD with one huge (exactly nearly) 4GB field
+# This uncovers the overflow in lpEncodeGetType
+# Expected to fail resulting in an empty stream
+start_server [list overrides [list save ""] ] {
+ test {XADD one huge field - 1} {
+ r config set proto-max-bulk-len 10000000000 ;#10gb
+ r config set client-query-buffer-limit 10000000000 ;#10gb
+ r write "*5\r\n\$4\r\nXADD\r\n\$2\r\nS1\r\n\$1\r\n*\r\n"
+ r write "\$1\r\nA\r\n"
+ catch {
+ write_big_bulk 4294967295 ;#4gb-1
+ } err
+ assert_match {*too large*} $err
+ r xlen S1
+ } {0} {large-memory}
+}
+
+# Gradually add big stream fields using repeated XADD calls
+start_server [list overrides [list save ""] ] {
+ test {several XADD big fields} {
+ r config set stream-node-max-bytes 0
+ for {set j 0} {$j<10} {incr j} {
+ r xadd stream * 1 $::str500 2 $::str500
+ }
+ r ping
+ r xlen stream
+ } {10} {large-memory}
+}
+
+# Add over 4GB to a single stream listpack (one XADD command)
+# Expected to fail resulting in an empty stream
+start_server [list overrides [list save ""] ] {
+ test {single XADD big fields} {
+ r write "*23\r\n\$4\r\nXADD\r\n\$1\r\nS\r\n\$1\r\n*\r\n"
+ for {set j 0} {$j<10} {incr j} {
+ r write "\$1\r\n$j\r\n"
+ write_big_bulk 500000000 "" yes ;#500mb
+ }
+ r flush
+ catch {r read} err
+ assert_match {*too large*} $err
+ r xlen S
+ } {0} {large-memory}
+}
+
+# Gradually add big hash fields using repeated HSET calls
+# This reproduces the overflow in the call to ziplistResize
+# Object will be converted to hashtable encoding
+start_server [list overrides [list save ""] ] {
+ r config set hash-max-ziplist-value 1000000000 ;#1gb
+ test {hash with many big fields} {
+ for {set j 0} {$j<10} {incr j} {
+ r hset h $j $::str500
+ }
+ r object encoding h
+ } {hashtable} {large-memory}
+}
+
+# Add over 4GB to a single hash field (one HSET command)
+# Object will be converted to hashtable encoding
+start_server [list overrides [list save ""] ] {
+ test {hash with one huge field} {
+ catch {r config set hash-max-ziplist-value 10000000000} ;#10gb
+ r config set proto-max-bulk-len 10000000000 ;#10gb
+ r config set client-query-buffer-limit 10000000000 ;#10gb
+ r write "*4\r\n\$4\r\nHSET\r\n\$2\r\nH1\r\n"
+ r write "\$1\r\nA\r\n"
+ write_big_bulk 5000000000 ;#5gb
+ r object encoding H1
+ } {hashtable} {large-memory}
+}
+} ;# run_solo
+
+# SORT which stores an integer encoded element into a list.
+# Just for coverage, no news here.
+start_server [list overrides [list save ""] ] {
+ test {SORT adds integer field to list} {
+ r set S1 asdf
+ r set S2 123 ;# integer encoded
+ assert_encoding "int" S2
+ r sadd myset 1 2
+ r mset D1 1 D2 2
+ r sort myset by D* get S* store mylist
+ r llen mylist
+ } {2} {cluster:skip}
+}
diff --git a/tests/unit/wait.tcl b/tests/unit/wait.tcl
new file mode 100644
index 0000000..bd0bced
--- /dev/null
+++ b/tests/unit/wait.tcl
@@ -0,0 +1,505 @@
+source tests/support/cli.tcl
+
+start_server {tags {"wait network external:skip"}} {
+start_server {} {
+ set slave [srv 0 client]
+ set slave_host [srv 0 host]
+ set slave_port [srv 0 port]
+ set slave_pid [srv 0 pid]
+ set master [srv -1 client]
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+
+ test {Setup slave} {
+ $slave slaveof $master_host $master_port
+ wait_for_condition 50 100 {
+ [s 0 master_link_status] eq {up}
+ } else {
+ fail "Replication not started."
+ }
+ }
+
+ test {WAIT out of range timeout (milliseconds)} {
+ # Timeout is parsed as milliseconds by getLongLongFromObjectOrReply().
+ # Verify we get out of range message if value is behind LLONG_MAX
+ # (decimal value equals to 0x8000000000000000)
+ assert_error "*or out of range*" {$master wait 2 9223372036854775808}
+
+ # expected to fail by later overflow condition after addition
+ # of mstime(). (decimal value equals to 0x7FFFFFFFFFFFFFFF)
+ assert_error "*timeout is out of range*" {$master wait 2 9223372036854775807}
+
+ assert_error "*timeout is negative*" {$master wait 2 -1}
+ }
+
+ test {WAIT should acknowledge 1 additional copy of the data} {
+ $master set foo 0
+ $master incr foo
+ $master incr foo
+ $master incr foo
+ assert {[$master wait 1 5000] == 1}
+ assert {[$slave get foo] == 3}
+ }
+
+ test {WAIT should not acknowledge 2 additional copies of the data} {
+ $master incr foo
+ assert {[$master wait 2 1000] <= 1}
+ }
+
+ test {WAIT should not acknowledge 1 additional copy if slave is blocked} {
+ pause_process $slave_pid
+ $master set foo 0
+ $master incr foo
+ $master incr foo
+ $master incr foo
+ assert {[$master wait 1 1000] == 0}
+ resume_process $slave_pid
+ assert {[$master wait 1 1000] == 1}
+ }
+
+ test {WAIT implicitly blocks on client pause since ACKs aren't sent} {
+ pause_process $slave_pid
+ $master multi
+ $master incr foo
+ $master client pause 10000 write
+ $master exec
+ assert {[$master wait 1 1000] == 0}
+ $master client unpause
+ resume_process $slave_pid
+ assert {[$master wait 1 1000] == 1}
+ }
+
+ test {WAIT replica multiple clients unblock - reuse last result} {
+ set rd [redis_deferring_client -1]
+ set rd2 [redis_deferring_client -1]
+
+ pause_process $slave_pid
+
+ $rd incr foo
+ $rd read
+
+ $rd2 incr foo
+ $rd2 read
+
+ $rd wait 1 0
+ $rd2 wait 1 0
+ wait_for_blocked_clients_count 2 100 10 -1
+
+ resume_process $slave_pid
+
+ assert_equal [$rd read] {1}
+ assert_equal [$rd2 read] {1}
+
+ $rd ping
+ assert_equal [$rd read] {PONG}
+ $rd2 ping
+ assert_equal [$rd2 read] {PONG}
+
+ $rd close
+ $rd2 close
+ }
+}}
+
+
+tags {"wait aof network external:skip"} {
+ start_server {overrides {appendonly {yes} auto-aof-rewrite-percentage {0}}} {
+ set master [srv 0 client]
+
+ test {WAITAOF local copy before fsync} {
+ r config set appendfsync no
+ $master incr foo
+ assert_equal [$master waitaof 1 0 50] {0 0} ;# exits on timeout
+ r config set appendfsync everysec
+ }
+
+ test {WAITAOF local copy everysec} {
+ $master incr foo
+ assert_equal [$master waitaof 1 0 0] {1 0}
+ }
+
+ test {WAITAOF local copy with appendfsync always} {
+ r config set appendfsync always
+ $master incr foo
+ assert_equal [$master waitaof 1 0 0] {1 0}
+ r config set appendfsync everysec
+ }
+
+ test {WAITAOF local wait and then stop aof} {
+ set rd [redis_deferring_client]
+ $rd incr foo
+ $rd read
+ $rd waitaof 1 0 0
+ wait_for_blocked_client
+ r config set appendonly no ;# this should release the blocked client as an error
+ assert_error {ERR WAITAOF cannot be used when numlocal is set but appendonly is disabled.} {$rd read}
+ $rd close
+ }
+
+ test {WAITAOF local on server with aof disabled} {
+ $master incr foo
+ assert_error {ERR WAITAOF cannot be used when numlocal is set but appendonly is disabled.} {$master waitaof 1 0 0}
+ }
+
+ test {WAITAOF local if AOFRW was postponed} {
+ r config set appendfsync everysec
+
+ # turn off AOF
+ r config set appendonly no
+
+ # create an RDB child that takes a lot of time to run
+ r set x y
+ r config set rdb-key-save-delay 100000000 ;# 100 seconds
+ r bgsave
+ assert_equal [s rdb_bgsave_in_progress] 1
+
+ # turn on AOF
+ r config set appendonly yes
+ assert_equal [s aof_rewrite_scheduled] 1
+
+ # create a write command (to increment master_repl_offset)
+ r set x y
+
+ # reset save_delay and kill RDB child
+ r config set rdb-key-save-delay 0
+ catch {exec kill -9 [get_child_pid 0]}
+
+ # wait for AOF (will unblock after AOFRW finishes)
+ assert_equal [r waitaof 1 0 10000] {1 0}
+
+ # make sure AOFRW finished
+ assert_equal [s aof_rewrite_in_progress] 0
+ assert_equal [s aof_rewrite_scheduled] 0
+ }
+
+ $master config set appendonly yes
+ waitForBgrewriteaof $master
+
+ start_server {overrides {appendonly {yes} auto-aof-rewrite-percentage {0}}} {
+ set master_host [srv -1 host]
+ set master_port [srv -1 port]
+ set replica [srv 0 client]
+ set replica_host [srv 0 host]
+ set replica_port [srv 0 port]
+ set replica_pid [srv 0 pid]
+
+ # make sure the master always fsyncs first (easier to test)
+ $master config set appendfsync always
+ $replica config set appendfsync no
+
+ test {WAITAOF on demoted master gets unblocked with an error} {
+ set rd [redis_deferring_client]
+ $rd incr foo
+ $rd read
+ $rd waitaof 0 1 0
+ wait_for_blocked_client
+ $replica replicaof $master_host $master_port
+ assert_error {UNBLOCKED force unblock from blocking operation,*} {$rd read}
+ $rd close
+ }
+
+ wait_for_ofs_sync $master $replica
+
+ test {WAITAOF replica copy before fsync} {
+ $master incr foo
+ assert_equal [$master waitaof 0 1 50] {1 0} ;# exits on timeout
+ }
+ $replica config set appendfsync everysec
+
+ test {WAITAOF replica copy everysec} {
+ $replica config set appendfsync everysec
+ waitForBgrewriteaof $replica ;# Make sure there is no AOFRW
+
+ $master incr foo
+ assert_equal [$master waitaof 0 1 0] {1 1}
+ }
+
+ test {WAITAOF replica copy everysec with AOFRW} {
+ $replica config set appendfsync everysec
+
+ # When we trigger an AOFRW, a fsync is triggered when closing the old INCR file,
+ # so with the everysec, we will skip that second of fsync, and in the next second
+ # after that, we will eventually do the fsync.
+ $replica bgrewriteaof
+ waitForBgrewriteaof $replica
+
+ $master incr foo
+ assert_equal [$master waitaof 0 1 0] {1 1}
+ }
+
+ test {WAITAOF replica copy everysec with slow AOFRW} {
+ $replica config set appendfsync everysec
+ $replica config set rdb-key-save-delay 1000000 ;# 1 sec
+
+ $replica bgrewriteaof
+
+ $master incr foo
+ assert_equal [$master waitaof 0 1 0] {1 1}
+
+ $replica config set rdb-key-save-delay 0
+ waitForBgrewriteaof $replica
+ }
+
+ test {WAITAOF replica copy everysec->always with AOFRW} {
+ $replica config set appendfsync everysec
+
+ # Try to fit all of them in the same round second, although there's no way to guarantee
+ # that, it can be done on fast machine. In any case, the test shouldn't fail either.
+ $replica bgrewriteaof
+ $master incr foo
+ waitForBgrewriteaof $replica
+ $replica config set appendfsync always
+
+ assert_equal [$master waitaof 0 1 0] {1 1}
+ }
+
+ test {WAITAOF replica copy appendfsync always} {
+ $replica config set appendfsync always
+ $master incr foo
+ assert_equal [$master waitaof 0 1 0] {1 1}
+ $replica config set appendfsync everysec
+ }
+
+ test {WAITAOF replica copy if replica is blocked} {
+ pause_process $replica_pid
+ $master incr foo
+ assert_equal [$master waitaof 0 1 50] {1 0} ;# exits on timeout
+ resume_process $replica_pid
+ assert_equal [$master waitaof 0 1 0] {1 1}
+ }
+
+ test {WAITAOF replica multiple clients unblock - reuse last result} {
+ set rd [redis_deferring_client -1]
+ set rd2 [redis_deferring_client -1]
+
+ pause_process $replica_pid
+
+ $rd incr foo
+ $rd read
+
+ $rd2 incr foo
+ $rd2 read
+
+ $rd waitaof 0 1 0
+ $rd2 waitaof 0 1 0
+ wait_for_blocked_clients_count 2 100 10 -1
+
+ resume_process $replica_pid
+
+ assert_equal [$rd read] {1 1}
+ assert_equal [$rd2 read] {1 1}
+
+ $rd ping
+ assert_equal [$rd read] {PONG}
+ $rd2 ping
+ assert_equal [$rd2 read] {PONG}
+
+ $rd close
+ $rd2 close
+ }
+
+ test {WAITAOF on promoted replica} {
+ $replica replicaof no one
+ $replica incr foo
+ assert_equal [$replica waitaof 1 0 0] {1 0}
+ }
+
+ test {WAITAOF master that loses a replica and backlog is dropped} {
+ $master config set repl-backlog-ttl 1
+ after 2000 ;# wait for backlog to expire
+ $master incr foo
+ assert_equal [$master waitaof 1 0 0] {1 0}
+ }
+
+ test {WAITAOF master without backlog, wait is released when the replica finishes full-sync} {
+ set rd [redis_deferring_client -1]
+ $rd incr foo
+ $rd read
+ $rd waitaof 0 1 0
+ wait_for_blocked_client -1
+ $replica replicaof $master_host $master_port
+ assert_equal [$rd read] {1 1}
+ $rd close
+ }
+
+ test {WAITAOF master isn't configured to do AOF} {
+ $master config set appendonly no
+ $master incr foo
+ assert_equal [$master waitaof 0 1 0] {0 1}
+ }
+
+ test {WAITAOF replica isn't configured to do AOF} {
+ $master config set appendonly yes
+ waitForBgrewriteaof $master
+ $replica config set appendonly no
+ $master incr foo
+ assert_equal [$master waitaof 1 0 0] {1 0}
+ }
+
+ test {WAITAOF both local and replica got AOF enabled at runtime} {
+ $replica config set appendonly yes
+ waitForBgrewriteaof $replica
+ $master incr foo
+ assert_equal [$master waitaof 1 1 0] {1 1}
+ }
+
+ test {WAITAOF master sends PING after last write} {
+ $master config set repl-ping-replica-period 1
+ $master incr foo
+ after 1200 ;# wait for PING
+ $master get foo
+ assert_equal [$master waitaof 1 1 0] {1 1}
+ $master config set repl-ping-replica-period 10
+ }
+
+ test {WAITAOF master client didn't send any write command} {
+ $master config set repl-ping-replica-period 1
+ set client [redis_client -1]
+ after 1200 ;# wait for PING
+ assert_equal [$master waitaof 1 1 0] {1 1}
+ $client close
+ $master config set repl-ping-replica-period 10
+ }
+
+ test {WAITAOF master client didn't send any command} {
+ $master config set repl-ping-replica-period 1
+ set client [redis [srv -1 "host"] [srv -1 "port"] 0 $::tls]
+ after 1200 ;# wait for PING
+ assert_equal [$master waitaof 1 1 0] {1 1}
+ $client close
+ $master config set repl-ping-replica-period 10
+ }
+
+ foreach fsync {no everysec always} {
+ test "WAITAOF when replica switches between masters, fsync: $fsync" {
+ # test a case where a replica is moved from one master to the other
+ # between two replication streams with different offsets that should
+ # not be mixed. done to smoke-test race conditions with bio thread.
+ start_server {overrides {appendonly {yes} auto-aof-rewrite-percentage {0}}} {
+ start_server {overrides {appendonly {yes} auto-aof-rewrite-percentage {0}}} {
+ set master2 [srv -1 client]
+ set master2_host [srv -1 host]
+ set master2_port [srv -1 port]
+ set replica2 [srv 0 client]
+ set replica2_host [srv 0 host]
+ set replica2_port [srv 0 port]
+ set replica2_pid [srv 0 pid]
+
+ $replica2 replicaof $master2_host $master2_port
+ wait_for_ofs_sync $master2 $replica2
+
+ $master config set appendfsync $fsync
+ $master2 config set appendfsync $fsync
+ $replica config set appendfsync $fsync
+ $replica2 config set appendfsync $fsync
+ if {$fsync eq "no"} {
+ after 2000 ;# wait for any previous fsync to finish
+ # can't afford "no" on the masters
+ $master config set appendfsync always
+ $master2 config set appendfsync always
+ } elseif {$fsync eq "everysec"} {
+ after 990 ;# hoping to hit a race
+ }
+
+ # add some writes and block a client on each master
+ set rd [redis_deferring_client -3]
+ set rd2 [redis_deferring_client -1]
+ $rd set boo 11
+ $rd2 set boo 22
+ $rd read
+ $rd2 read
+ $rd waitaof 1 1 0
+ $rd2 waitaof 1 1 0
+
+ if {$fsync eq "no"} {
+ # since appendfsync is disabled in the replicas, the client
+ # will get released only with full sync
+ wait_for_blocked_client -1
+ wait_for_blocked_client -3
+ }
+ # switch between the two replicas
+ $replica2 replicaof $master_host $master_port
+ $replica replicaof $master2_host $master2_port
+ assert_equal [$rd read] {1 1}
+ assert_equal [$rd2 read] {1 1}
+ $rd close
+ $rd2 close
+
+ assert_equal [$replica get boo] 22
+ assert_equal [$replica2 get boo] 11
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+start_server {tags {"failover external:skip"}} {
+start_server {} {
+start_server {} {
+ set master [srv 0 client]
+ set master_host [srv 0 host]
+ set master_port [srv 0 port]
+
+ set replica1 [srv -1 client]
+ set replica1_pid [srv -1 pid]
+
+ set replica2 [srv -2 client]
+
+ test {setup replication for following tests} {
+ $replica1 replicaof $master_host $master_port
+ $replica2 replicaof $master_host $master_port
+ wait_for_sync $replica1
+ wait_for_sync $replica2
+ }
+
+ test {WAIT and WAITAOF replica multiple clients unblock - reuse last result} {
+ set rd [redis_deferring_client]
+ set rd2 [redis_deferring_client]
+
+ $master config set appendonly yes
+ $replica1 config set appendonly yes
+ $replica2 config set appendonly yes
+
+ $master config set appendfsync always
+ $replica1 config set appendfsync no
+ $replica2 config set appendfsync no
+
+ waitForBgrewriteaof $master
+ waitForBgrewriteaof $replica1
+ waitForBgrewriteaof $replica2
+
+ pause_process $replica1_pid
+
+ $rd incr foo
+ $rd read
+ $rd waitaof 0 1 0
+
+ # rd2 has a newer repl_offset
+ $rd2 incr foo
+ $rd2 read
+ $rd2 wait 2 0
+
+ wait_for_blocked_clients_count 2
+
+ resume_process $replica1_pid
+
+ # WAIT will unblock the client first.
+ assert_equal [$rd2 read] {2}
+
+ # Make $replica1 catch up the repl_aof_off, then WAITAOF will unblock the client.
+ $replica1 config set appendfsync always
+ $master incr foo
+ assert_equal [$rd read] {1 1}
+
+ $rd ping
+ assert_equal [$rd read] {PONG}
+ $rd2 ping
+ assert_equal [$rd2 read] {PONG}
+
+ $rd close
+ $rd2 close
+ }
+}
+}
+}