summaryrefslogtreecommitdiffstats
path: root/utils
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-14 13:40:54 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-14 13:40:54 +0000
commit317c0644ccf108aa23ef3fd8358bd66c2840bfc0 (patch)
treec417b3d25c86b775989cb5ac042f37611b626c8a /utils
parentInitial commit. (diff)
downloadredis-upstream/5%7.2.4.tar.xz
redis-upstream/5%7.2.4.zip
Adding upstream version 5:7.2.4.upstream/5%7.2.4
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--utils/build-static-symbols.tcl22
-rw-r--r--utils/cluster_fail_time.tcl50
-rw-r--r--utils/corrupt_rdb.c45
-rw-r--r--utils/create-cluster/.gitignore6
-rw-r--r--utils/create-cluster/README27
-rwxr-xr-xutils/create-cluster/create-cluster125
-rwxr-xr-xutils/gen-test-certs.sh58
-rwxr-xr-xutils/generate-command-code.py623
-rwxr-xr-xutils/generate-commands-json.py136
-rwxr-xr-xutils/generate-module-api-doc.rb205
-rw-r--r--utils/graphs/commits-over-time/README.md16
-rwxr-xr-xutils/graphs/commits-over-time/genhtml.tcl96
-rw-r--r--utils/hyperloglog/.gitignore1
-rw-r--r--utils/hyperloglog/hll-err.rb27
-rw-r--r--utils/hyperloglog/hll-gnuplot-graph.rb88
-rwxr-xr-xutils/install_server.sh291
-rw-r--r--utils/lru/README19
-rw-r--r--utils/lru/lfu-simulation.c158
-rw-r--r--utils/lru/test-lru.rb223
-rw-r--r--utils/redis-copy.rb35
-rw-r--r--utils/redis-sha1.rb52
-rwxr-xr-xutils/redis_init_script50
-rwxr-xr-xutils/redis_init_script.tpl44
-rwxr-xr-xutils/releasetools/01_create_tarball.sh14
-rwxr-xr-xutils/releasetools/02_upload_tarball.sh23
-rwxr-xr-xutils/releasetools/03_test_release.sh28
-rwxr-xr-xutils/releasetools/04_release_hash.sh13
-rwxr-xr-xutils/releasetools/changelog.tcl35
-rw-r--r--utils/reply_schema_linter.js31
-rwxr-xr-xutils/req-res-log-validator.py350
-rw-r--r--utils/req-res-validator/requirements.txt2
-rwxr-xr-xutils/speed-regression.tcl130
-rw-r--r--utils/srandmember/README.md14
-rw-r--r--utils/srandmember/showdist.rb33
-rw-r--r--utils/srandmember/showfreq.rb23
-rw-r--r--utils/systemd-redis_multiple_servers@.service37
-rw-r--r--utils/systemd-redis_server.service43
-rw-r--r--utils/tracking_collisions.c76
-rwxr-xr-xutils/whatisdoing.sh24
39 files changed, 3273 insertions, 0 deletions
diff --git a/utils/build-static-symbols.tcl b/utils/build-static-symbols.tcl
new file mode 100644
index 0000000..e634cbe
--- /dev/null
+++ b/utils/build-static-symbols.tcl
@@ -0,0 +1,22 @@
+# Build a symbol table for static symbols of redis.c
+# Useful to get stack traces on segfault without a debugger. See redis.c
+# for more information.
+#
+# Copyright(C) 2009 Salvatore Sanfilippo, under the BSD license.
+
+set fd [open redis.c]
+set symlist {}
+while {[gets $fd line] != -1} {
+ if {[regexp {^static +[A-z0-9]+[ *]+([A-z0-9]*)\(} $line - sym]} {
+ lappend symlist $sym
+ }
+}
+set symlist [lsort -unique $symlist]
+puts "static struct redisFunctionSym symsTable\[\] = {"
+foreach sym $symlist {
+ puts "{\"$sym\",(unsigned long)$sym},"
+}
+puts "{NULL,0}"
+puts "};"
+
+close $fd
diff --git a/utils/cluster_fail_time.tcl b/utils/cluster_fail_time.tcl
new file mode 100644
index 0000000..8739949
--- /dev/null
+++ b/utils/cluster_fail_time.tcl
@@ -0,0 +1,50 @@
+# This simple script is used in order to estimate the average PFAIL->FAIL
+# state switch after a failure.
+
+set ::sleep_time 10 ; # How much to sleep to trigger PFAIL.
+set ::fail_port 30016 ; # Node to put in sleep.
+set ::other_port 30001 ; # Node to use to monitor the flag switch.
+
+proc avg vector {
+ set sum 0.0
+ foreach x $vector {
+ set sum [expr {$sum+$x}]
+ }
+ expr {$sum/[llength $vector]}
+}
+
+set samples {}
+while 1 {
+ exec redis-cli -p $::fail_port debug sleep $::sleep_time > /dev/null &
+
+ # Wait for fail? to appear.
+ while 1 {
+ set output [exec redis-cli -p $::other_port cluster nodes]
+ if {[string match {*fail\?*} $output]} break
+ after 100
+ }
+
+ puts "FAIL?"
+ set start [clock milliseconds]
+
+ # Wait for fail? to disappear.
+ while 1 {
+ set output [exec redis-cli -p $::other_port cluster nodes]
+ if {![string match {*fail\?*} $output]} break
+ after 100
+ }
+
+ puts "FAIL"
+ set now [clock milliseconds]
+ set elapsed [expr {$now-$start}]
+ puts $elapsed
+ lappend samples $elapsed
+
+ puts "AVG([llength $samples]): [avg $samples]"
+
+ # Wait for the instance to be available again.
+ exec redis-cli -p $::fail_port ping
+
+ # Wait for the fail flag to be cleared.
+ after 2000
+}
diff --git a/utils/corrupt_rdb.c b/utils/corrupt_rdb.c
new file mode 100644
index 0000000..df9c93e
--- /dev/null
+++ b/utils/corrupt_rdb.c
@@ -0,0 +1,45 @@
+/* Trivia program to corrupt an RDB file in order to check the RDB check
+ * program behavior and effectiveness.
+ *
+ * Copyright (C) 2016 Salvatore Sanfilippo.
+ * This software is released in the 3-clause BSD license. */
+
+#include <stdio.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <time.h>
+
+int main(int argc, char **argv) {
+ struct stat stat;
+ int fd, cycles;
+
+ if (argc != 3) {
+ fprintf(stderr,"Usage: <filename> <cycles>\n");
+ exit(1);
+ }
+
+ srand(time(NULL));
+ char *filename = argv[1];
+ cycles = atoi(argv[2]);
+ fd = open(filename,O_RDWR);
+ if (fd == -1) {
+ perror("open");
+ exit(1);
+ }
+ fstat(fd,&stat);
+
+ while(cycles--) {
+ unsigned char buf[32];
+ unsigned long offset = rand()%stat.st_size;
+ int writelen = 1+rand()%31;
+ int j;
+
+ for (j = 0; j < writelen; j++) buf[j] = (char)rand();
+ lseek(fd,offset,SEEK_SET);
+ printf("Writing %d bytes at offset %lu\n", writelen, offset);
+ write(fd,buf,writelen);
+ }
+ return 0;
+}
diff --git a/utils/create-cluster/.gitignore b/utils/create-cluster/.gitignore
new file mode 100644
index 0000000..a34b639
--- /dev/null
+++ b/utils/create-cluster/.gitignore
@@ -0,0 +1,6 @@
+config.sh
+*.rdb
+*.aof
+*.conf
+*.log
+appendonlydir-*
diff --git a/utils/create-cluster/README b/utils/create-cluster/README
new file mode 100644
index 0000000..bcd7459
--- /dev/null
+++ b/utils/create-cluster/README
@@ -0,0 +1,27 @@
+create-cluster is a small script used to easily start a big number of Redis
+instances configured to run in cluster mode. Its main goal is to allow manual
+testing in a condition which is not easy to replicate with the Redis cluster
+unit tests, for example when a lot of instances are needed in order to trigger
+a given bug.
+
+The tool can also be used just to easily create a number of instances in a
+Redis Cluster in order to experiment a bit with the system.
+
+USAGE
+---
+
+To create a cluster, follow these steps:
+
+1. Edit create-cluster and change the start / end port, depending on the
+number of instances you want to create.
+2. Use "./create-cluster start" in order to run the instances.
+3. Use "./create-cluster create" in order to execute redis-cli --cluster create, so that
+an actual Redis cluster will be created. (If you're accessing your setup via a local container, ensure that the CLUSTER_HOST value is changed to your local IP)
+4. Now you are ready to play with the cluster. AOF files and logs for each instances are created in the current directory.
+
+In order to stop a cluster:
+
+1. Use "./create-cluster stop" to stop all the instances. After you stopped the instances you can use "./create-cluster start" to restart them if you change your mind.
+2. Use "./create-cluster clean" to remove all the AOF / log files to restart with a clean environment.
+
+Use the command "./create-cluster help" to get the full list of features.
diff --git a/utils/create-cluster/create-cluster b/utils/create-cluster/create-cluster
new file mode 100755
index 0000000..d97ee2b
--- /dev/null
+++ b/utils/create-cluster/create-cluster
@@ -0,0 +1,125 @@
+#!/bin/bash
+
+SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+
+# Settings
+BIN_PATH="$SCRIPT_DIR/../../src/"
+CLUSTER_HOST=127.0.0.1
+PORT=30000
+TIMEOUT=2000
+NODES=6
+REPLICAS=1
+PROTECTED_MODE=yes
+ADDITIONAL_OPTIONS=""
+
+# You may want to put the above config parameters into config.sh in order to
+# override the defaults without modifying this script.
+
+if [ -a config.sh ]
+then
+ source "config.sh"
+fi
+
+# Computed vars
+ENDPORT=$((PORT+NODES))
+
+if [ "$1" == "start" ]
+then
+ while [ $((PORT < ENDPORT)) != "0" ]; do
+ PORT=$((PORT+1))
+ echo "Starting $PORT"
+ $BIN_PATH/redis-server --port $PORT --protected-mode $PROTECTED_MODE --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --appenddirname appendonlydir-${PORT} --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes ${ADDITIONAL_OPTIONS}
+ done
+ exit 0
+fi
+
+if [ "$1" == "create" ]
+then
+ HOSTS=""
+ while [ $((PORT < ENDPORT)) != "0" ]; do
+ PORT=$((PORT+1))
+ HOSTS="$HOSTS $CLUSTER_HOST:$PORT"
+ done
+ OPT_ARG=""
+ if [ "$2" == "-f" ]; then
+ OPT_ARG="--cluster-yes"
+ fi
+ $BIN_PATH/redis-cli --cluster create $HOSTS --cluster-replicas $REPLICAS $OPT_ARG
+ exit 0
+fi
+
+if [ "$1" == "stop" ]
+then
+ while [ $((PORT < ENDPORT)) != "0" ]; do
+ PORT=$((PORT+1))
+ echo "Stopping $PORT"
+ $BIN_PATH/redis-cli -p $PORT shutdown nosave
+ done
+ exit 0
+fi
+
+if [ "$1" == "watch" ]
+then
+ PORT=$((PORT+1))
+ while [ 1 ]; do
+ clear
+ date
+ $BIN_PATH/redis-cli -p $PORT cluster nodes | head -30
+ sleep 1
+ done
+ exit 0
+fi
+
+if [ "$1" == "tail" ]
+then
+ INSTANCE=$2
+ PORT=$((PORT+INSTANCE))
+ tail -f ${PORT}.log
+ exit 0
+fi
+
+if [ "$1" == "tailall" ]
+then
+ tail -f *.log
+ exit 0
+fi
+
+if [ "$1" == "call" ]
+then
+ while [ $((PORT < ENDPORT)) != "0" ]; do
+ PORT=$((PORT+1))
+ $BIN_PATH/redis-cli -p $PORT $2 $3 $4 $5 $6 $7 $8 $9
+ done
+ exit 0
+fi
+
+if [ "$1" == "clean" ]
+then
+ echo "Cleaning *.log"
+ rm -rf *.log
+ echo "Cleaning appendonlydir-*"
+ rm -rf appendonlydir-*
+ echo "Cleaning dump-*.rdb"
+ rm -rf dump-*.rdb
+ echo "Cleaning nodes-*.conf"
+ rm -rf nodes-*.conf
+ exit 0
+fi
+
+if [ "$1" == "clean-logs" ]
+then
+ echo "Cleaning *.log"
+ rm -rf *.log
+ exit 0
+fi
+
+echo "Usage: $0 [start|create|stop|watch|tail|tailall|clean|clean-logs|call]"
+echo "start -- Launch Redis Cluster instances."
+echo "create [-f] -- Create a cluster using redis-cli --cluster create."
+echo "stop -- Stop Redis Cluster instances."
+echo "watch -- Show CLUSTER NODES output (first 30 lines) of first node."
+echo "tail <id> -- Run tail -f of instance at base port + ID."
+echo "tailall -- Run tail -f for all the log files at once."
+echo "clean -- Remove all instances data, logs, configs."
+echo "clean-logs -- Remove just instances logs."
+echo "call <cmd> -- Call a command (up to 7 arguments) on all nodes."
diff --git a/utils/gen-test-certs.sh b/utils/gen-test-certs.sh
new file mode 100755
index 0000000..6bc9d86
--- /dev/null
+++ b/utils/gen-test-certs.sh
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+# Generate some test certificates which are used by the regression test suite:
+#
+# tests/tls/ca.{crt,key} Self signed CA certificate.
+# tests/tls/redis.{crt,key} A certificate with no key usage/policy restrictions.
+# tests/tls/client.{crt,key} A certificate restricted for SSL client usage.
+# tests/tls/server.{crt,key} A certificate restricted for SSL server usage.
+# tests/tls/redis.dh DH Params file.
+
+generate_cert() {
+ local name=$1
+ local cn="$2"
+ local opts="$3"
+
+ local keyfile=tests/tls/${name}.key
+ local certfile=tests/tls/${name}.crt
+
+ [ -f $keyfile ] || openssl genrsa -out $keyfile 2048
+ openssl req \
+ -new -sha256 \
+ -subj "/O=Redis Test/CN=$cn" \
+ -key $keyfile | \
+ openssl x509 \
+ -req -sha256 \
+ -CA tests/tls/ca.crt \
+ -CAkey tests/tls/ca.key \
+ -CAserial tests/tls/ca.txt \
+ -CAcreateserial \
+ -days 365 \
+ $opts \
+ -out $certfile
+}
+
+mkdir -p tests/tls
+[ -f tests/tls/ca.key ] || openssl genrsa -out tests/tls/ca.key 4096
+openssl req \
+ -x509 -new -nodes -sha256 \
+ -key tests/tls/ca.key \
+ -days 3650 \
+ -subj '/O=Redis Test/CN=Certificate Authority' \
+ -out tests/tls/ca.crt
+
+cat > tests/tls/openssl.cnf <<_END_
+[ server_cert ]
+keyUsage = digitalSignature, keyEncipherment
+nsCertType = server
+
+[ client_cert ]
+keyUsage = digitalSignature, keyEncipherment
+nsCertType = client
+_END_
+
+generate_cert server "Server-only" "-extfile tests/tls/openssl.cnf -extensions server_cert"
+generate_cert client "Client-only" "-extfile tests/tls/openssl.cnf -extensions client_cert"
+generate_cert redis "Generic-cert"
+
+[ -f tests/tls/redis.dh ] || openssl dhparam -out tests/tls/redis.dh 2048
diff --git a/utils/generate-command-code.py b/utils/generate-command-code.py
new file mode 100755
index 0000000..dc66ce8
--- /dev/null
+++ b/utils/generate-command-code.py
@@ -0,0 +1,623 @@
+#!/usr/bin/env python3
+import glob
+import json
+import os
+import argparse
+
+ARG_TYPES = {
+ "string": "ARG_TYPE_STRING",
+ "integer": "ARG_TYPE_INTEGER",
+ "double": "ARG_TYPE_DOUBLE",
+ "key": "ARG_TYPE_KEY",
+ "pattern": "ARG_TYPE_PATTERN",
+ "unix-time": "ARG_TYPE_UNIX_TIME",
+ "pure-token": "ARG_TYPE_PURE_TOKEN",
+ "oneof": "ARG_TYPE_ONEOF",
+ "block": "ARG_TYPE_BLOCK",
+}
+
+GROUPS = {
+ "generic": "COMMAND_GROUP_GENERIC",
+ "string": "COMMAND_GROUP_STRING",
+ "list": "COMMAND_GROUP_LIST",
+ "set": "COMMAND_GROUP_SET",
+ "sorted_set": "COMMAND_GROUP_SORTED_SET",
+ "hash": "COMMAND_GROUP_HASH",
+ "pubsub": "COMMAND_GROUP_PUBSUB",
+ "transactions": "COMMAND_GROUP_TRANSACTIONS",
+ "connection": "COMMAND_GROUP_CONNECTION",
+ "server": "COMMAND_GROUP_SERVER",
+ "scripting": "COMMAND_GROUP_SCRIPTING",
+ "hyperloglog": "COMMAND_GROUP_HYPERLOGLOG",
+ "cluster": "COMMAND_GROUP_CLUSTER",
+ "sentinel": "COMMAND_GROUP_SENTINEL",
+ "geo": "COMMAND_GROUP_GEO",
+ "stream": "COMMAND_GROUP_STREAM",
+ "bitmap": "COMMAND_GROUP_BITMAP",
+}
+
+
+def get_optional_desc_string(desc, field, force_uppercase=False):
+ v = desc.get(field, None)
+ if v and force_uppercase:
+ v = v.upper()
+ ret = "\"%s\"" % v if v else "NULL"
+ return ret.replace("\n", "\\n")
+
+
+def check_command_args_key_specs(args, command_key_specs_index_set, command_arg_key_specs_index_set):
+ if not args:
+ return True
+
+ for arg in args:
+ if arg.key_spec_index is not None:
+ assert isinstance(arg.key_spec_index, int)
+
+ if arg.key_spec_index not in command_key_specs_index_set:
+ print("command: %s arg: %s key_spec_index error" % (command.fullname(), arg.name))
+ return False
+
+ command_arg_key_specs_index_set.add(arg.key_spec_index)
+
+ if not check_command_args_key_specs(arg.subargs, command_key_specs_index_set, command_arg_key_specs_index_set):
+ return False
+
+ return True
+
+def check_command_key_specs(command):
+ if not command.key_specs:
+ return True
+
+ assert isinstance(command.key_specs, list)
+
+ for cmd_key_spec in command.key_specs:
+ if "flags" not in cmd_key_spec:
+ print("command: %s key_specs missing flags" % command.fullname())
+ return False
+
+ if "NOT_KEY" in cmd_key_spec["flags"]:
+ # Like SUNSUBSCRIBE / SPUBLISH / SSUBSCRIBE
+ return True
+
+ command_key_specs_index_set = set(range(len(command.key_specs)))
+ command_arg_key_specs_index_set = set()
+
+ # Collect key_spec used for each arg, including arg.subarg
+ if not check_command_args_key_specs(command.args, command_key_specs_index_set, command_arg_key_specs_index_set):
+ return False
+
+ # Check if we have key_specs not used
+ if command_key_specs_index_set != command_arg_key_specs_index_set:
+ print("command: %s may have unused key_spec" % command.fullname())
+ return False
+
+ return True
+
+
+# Globals
+subcommands = {} # container_name -> dict(subcommand_name -> Subcommand) - Only subcommands
+commands = {} # command_name -> Command - Only commands
+
+
+class KeySpec(object):
+ def __init__(self, spec):
+ self.spec = spec
+
+ def struct_code(self):
+ def _flags_code():
+ s = ""
+ for flag in self.spec.get("flags", []):
+ s += "CMD_KEY_%s|" % flag
+ return s[:-1] if s else 0
+
+ def _begin_search_code():
+ if self.spec["begin_search"].get("index"):
+ return "KSPEC_BS_INDEX,.bs.index={%d}" % (
+ self.spec["begin_search"]["index"]["pos"]
+ )
+ elif self.spec["begin_search"].get("keyword"):
+ return "KSPEC_BS_KEYWORD,.bs.keyword={\"%s\",%d}" % (
+ self.spec["begin_search"]["keyword"]["keyword"],
+ self.spec["begin_search"]["keyword"]["startfrom"],
+ )
+ elif "unknown" in self.spec["begin_search"]:
+ return "KSPEC_BS_UNKNOWN,{{0}}"
+ else:
+ print("Invalid begin_search! value=%s" % self.spec["begin_search"])
+ exit(1)
+
+ def _find_keys_code():
+ if self.spec["find_keys"].get("range"):
+ return "KSPEC_FK_RANGE,.fk.range={%d,%d,%d}" % (
+ self.spec["find_keys"]["range"]["lastkey"],
+ self.spec["find_keys"]["range"]["step"],
+ self.spec["find_keys"]["range"]["limit"]
+ )
+ elif self.spec["find_keys"].get("keynum"):
+ return "KSPEC_FK_KEYNUM,.fk.keynum={%d,%d,%d}" % (
+ self.spec["find_keys"]["keynum"]["keynumidx"],
+ self.spec["find_keys"]["keynum"]["firstkey"],
+ self.spec["find_keys"]["keynum"]["step"]
+ )
+ elif "unknown" in self.spec["find_keys"]:
+ return "KSPEC_FK_UNKNOWN,{{0}}"
+ else:
+ print("Invalid find_keys! value=%s" % self.spec["find_keys"])
+ exit(1)
+
+ return "%s,%s,%s,%s" % (
+ get_optional_desc_string(self.spec, "notes"),
+ _flags_code(),
+ _begin_search_code(),
+ _find_keys_code()
+ )
+
+
+def verify_no_dup_names(container_fullname, args):
+ name_list = [arg.name for arg in args]
+ name_set = set(name_list)
+ if len(name_list) != len(name_set):
+ print("{}: Dup argument names: {}".format(container_fullname, name_list))
+ exit(1)
+
+
+class Argument(object):
+ def __init__(self, parent_name, desc):
+ self.parent_name = parent_name
+ self.desc = desc
+ self.name = self.desc["name"].lower()
+ if "_" in self.name:
+ print("{}: name ({}) should not contain underscores".format(self.fullname(), self.name))
+ exit(1)
+ self.type = self.desc["type"]
+ self.key_spec_index = self.desc.get("key_spec_index", None)
+ self.subargs = []
+ if self.type in ["oneof", "block"]:
+ self.display = None
+ for subdesc in self.desc["arguments"]:
+ self.subargs.append(Argument(self.fullname(), subdesc))
+ if len(self.subargs) < 2:
+ print("{}: oneof or block arg contains less than two subargs".format(self.fullname()))
+ exit(1)
+ verify_no_dup_names(self.fullname(), self.subargs)
+ else:
+ self.display = self.desc.get("display")
+
+ def fullname(self):
+ return ("%s %s" % (self.parent_name, self.name)).replace("-", "_")
+
+ def struct_name(self):
+ return "%s_Arg" % (self.fullname().replace(" ", "_"))
+
+ def subarg_table_name(self):
+ assert self.subargs
+ return "%s_Subargs" % (self.fullname().replace(" ", "_"))
+
+ def struct_code(self):
+ """
+ Output example:
+ MAKE_ARG("expiration",ARG_TYPE_ONEOF,-1,NULL,NULL,NULL,CMD_ARG_OPTIONAL,5,NULL),.subargs=GETEX_expiration_Subargs
+ """
+
+ def _flags_code():
+ s = ""
+ if self.desc.get("optional", False):
+ s += "CMD_ARG_OPTIONAL|"
+ if self.desc.get("multiple", False):
+ s += "CMD_ARG_MULTIPLE|"
+ if self.desc.get("multiple_token", False):
+ assert self.desc.get("multiple", False) # Sanity
+ s += "CMD_ARG_MULTIPLE_TOKEN|"
+ return s[:-1] if s else "CMD_ARG_NONE"
+
+ s = "MAKE_ARG(\"%s\",%s,%d,%s,%s,%s,%s,%d,%s)" % (
+ self.name,
+ ARG_TYPES[self.type],
+ self.desc.get("key_spec_index", -1),
+ get_optional_desc_string(self.desc, "token", force_uppercase=True),
+ get_optional_desc_string(self.desc, "summary"),
+ get_optional_desc_string(self.desc, "since"),
+ _flags_code(),
+ len(self.subargs),
+ get_optional_desc_string(self.desc, "deprecated_since"),
+ )
+ if "display" in self.desc:
+ s += ",.display_text=\"%s\"" % self.desc["display"].lower()
+ if self.subargs:
+ s += ",.subargs=%s" % self.subarg_table_name()
+
+ return s
+
+ def write_internal_structs(self, f):
+ if self.subargs:
+ for subarg in self.subargs:
+ subarg.write_internal_structs(f)
+
+ f.write("/* %s argument table */\n" % self.fullname())
+ f.write("struct COMMAND_ARG %s[] = {\n" % self.subarg_table_name())
+ for subarg in self.subargs:
+ f.write("{%s},\n" % subarg.struct_code())
+ f.write("};\n\n")
+
+
+def to_c_name(str):
+ return str.replace(":", "").replace(".", "_").replace("$", "_")\
+ .replace("^", "_").replace("*", "_").replace("-", "_")
+
+
+class ReplySchema(object):
+ def __init__(self, name, desc):
+ self.name = to_c_name(name)
+ self.schema = {}
+ if desc.get("type") == "object":
+ if desc.get("properties") and desc.get("additionalProperties") is None:
+ print("%s: Any object that has properties should have the additionalProperties field" % self.name)
+ exit(1)
+ elif desc.get("type") == "array":
+ if desc.get("items") and isinstance(desc["items"], list) and any([desc.get(k) is None for k in ["minItems", "maxItems"]]):
+ print("%s: Any array that has items should have the minItems and maxItems fields" % self.name)
+ exit(1)
+ for k, v in desc.items():
+ if isinstance(v, dict):
+ self.schema[k] = ReplySchema("%s_%s" % (self.name, k), v)
+ elif isinstance(v, list):
+ self.schema[k] = []
+ for i, subdesc in enumerate(v):
+ self.schema[k].append(ReplySchema("%s_%s_%i" % (self.name, k,i), subdesc))
+ else:
+ self.schema[k] = v
+
+ def write(self, f):
+ def struct_code(name, k, v):
+ if isinstance(v, ReplySchema):
+ t = "JSON_TYPE_OBJECT"
+ vstr = ".value.object=&%s" % name
+ elif isinstance(v, list):
+ t = "JSON_TYPE_ARRAY"
+ vstr = ".value.array={.objects=%s,.length=%d}" % (name, len(v))
+ elif isinstance(v, bool):
+ t = "JSON_TYPE_BOOLEAN"
+ vstr = ".value.boolean=%d" % int(v)
+ elif isinstance(v, str):
+ t = "JSON_TYPE_STRING"
+ vstr = ".value.string=\"%s\"" % v
+ elif isinstance(v, int):
+ t = "JSON_TYPE_INTEGER"
+ vstr = ".value.integer=%d" % v
+
+ return "%s,\"%s\",%s" % (t, k, vstr)
+
+ for k, v in self.schema.items():
+ if isinstance(v, ReplySchema):
+ v.write(f)
+ elif isinstance(v, list):
+ for i, schema in enumerate(v):
+ schema.write(f)
+ name = to_c_name("%s_%s" % (self.name, k))
+ f.write("/* %s array reply schema */\n" % name)
+ f.write("struct jsonObject *%s[] = {\n" % name)
+ for i, schema in enumerate(v):
+ f.write("&%s,\n" % schema.name)
+ f.write("};\n\n")
+
+ f.write("/* %s reply schema */\n" % self.name)
+ f.write("struct jsonObjectElement %s_elements[] = {\n" % self.name)
+ for k, v in self.schema.items():
+ name = to_c_name("%s_%s" % (self.name, k))
+ f.write("{%s},\n" % struct_code(name, k, v))
+ f.write("};\n\n")
+ f.write("struct jsonObject %s = {%s_elements,.length=%d};\n\n" % (self.name, self.name, len(self.schema)))
+
+
+class Command(object):
+ def __init__(self, name, desc):
+ self.name = name.upper()
+ self.desc = desc
+ self.group = self.desc["group"]
+ self.key_specs = self.desc.get("key_specs", [])
+ self.subcommands = []
+ self.args = []
+ for arg_desc in self.desc.get("arguments", []):
+ self.args.append(Argument(self.fullname(), arg_desc))
+ verify_no_dup_names(self.fullname(), self.args)
+ self.reply_schema = None
+ if "reply_schema" in self.desc:
+ self.reply_schema = ReplySchema(self.reply_schema_name(), self.desc["reply_schema"])
+
+ def fullname(self):
+ return self.name.replace("-", "_").replace(":", "")
+
+ def return_types_table_name(self):
+ return "%s_ReturnInfo" % self.fullname().replace(" ", "_")
+
+ def subcommand_table_name(self):
+ assert self.subcommands
+ return "%s_Subcommands" % self.name
+
+ def history_table_name(self):
+ return "%s_History" % (self.fullname().replace(" ", "_"))
+
+ def tips_table_name(self):
+ return "%s_Tips" % (self.fullname().replace(" ", "_"))
+
+ def arg_table_name(self):
+ return "%s_Args" % (self.fullname().replace(" ", "_"))
+
+ def key_specs_table_name(self):
+ return "%s_Keyspecs" % (self.fullname().replace(" ", "_"))
+
+ def reply_schema_name(self):
+ return "%s_ReplySchema" % (self.fullname().replace(" ", "_"))
+
+ def struct_name(self):
+ return "%s_Command" % (self.fullname().replace(" ", "_"))
+
+ def history_code(self):
+ if not self.desc.get("history"):
+ return ""
+ s = ""
+ for tupl in self.desc["history"]:
+ s += "{\"%s\",\"%s\"},\n" % (tupl[0], tupl[1])
+ return s
+
+ def num_history(self):
+ if not self.desc.get("history"):
+ return 0
+ return len(self.desc["history"])
+
+ def tips_code(self):
+ if not self.desc.get("command_tips"):
+ return ""
+ s = ""
+ for hint in self.desc["command_tips"]:
+ s += "\"%s\",\n" % hint.lower()
+ return s
+
+ def num_tips(self):
+ if not self.desc.get("command_tips"):
+ return 0
+ return len(self.desc["command_tips"])
+
+ def key_specs_code(self):
+ s = ""
+ for spec in self.key_specs:
+ s += "{%s}," % KeySpec(spec).struct_code()
+ return s[:-1]
+
+
+ def struct_code(self):
+ """
+ Output example:
+ MAKE_CMD("set","Set the string value of a key","O(1)","1.0.0",CMD_DOC_NONE,NULL,NULL,"string",COMMAND_GROUP_STRING,SET_History,4,SET_Tips,0,setCommand,-3,CMD_WRITE|CMD_DENYOOM,ACL_CATEGORY_STRING,SET_Keyspecs,1,setGetKeys,5),.args=SET_Args
+ """
+
+ def _flags_code():
+ s = ""
+ for flag in self.desc.get("command_flags", []):
+ s += "CMD_%s|" % flag
+ return s[:-1] if s else 0
+
+ def _acl_categories_code():
+ s = ""
+ for cat in self.desc.get("acl_categories", []):
+ s += "ACL_CATEGORY_%s|" % cat
+ return s[:-1] if s else 0
+
+ def _doc_flags_code():
+ s = ""
+ for flag in self.desc.get("doc_flags", []):
+ s += "CMD_DOC_%s|" % flag
+ return s[:-1] if s else "CMD_DOC_NONE"
+
+ s = "MAKE_CMD(\"%s\",%s,%s,%s,%s,%s,%s,%s,%s,%s,%d,%s,%d,%s,%d,%s,%s,%s,%d,%s,%d)," % (
+ self.name.lower(),
+ get_optional_desc_string(self.desc, "summary"),
+ get_optional_desc_string(self.desc, "complexity"),
+ get_optional_desc_string(self.desc, "since"),
+ _doc_flags_code(),
+ get_optional_desc_string(self.desc, "replaced_by"),
+ get_optional_desc_string(self.desc, "deprecated_since"),
+ "\"%s\"" % self.group,
+ GROUPS[self.group],
+ self.history_table_name(),
+ self.num_history(),
+ self.tips_table_name(),
+ self.num_tips(),
+ self.desc.get("function", "NULL"),
+ self.desc["arity"],
+ _flags_code(),
+ _acl_categories_code(),
+ self.key_specs_table_name(),
+ len(self.key_specs),
+ self.desc.get("get_keys_function", "NULL"),
+ len(self.args),
+ )
+
+ if self.subcommands:
+ s += ".subcommands=%s," % self.subcommand_table_name()
+
+ if self.args:
+ s += ".args=%s," % self.arg_table_name()
+
+ if self.reply_schema and args.with_reply_schema:
+ s += ".reply_schema=&%s," % self.reply_schema_name()
+
+ return s[:-1]
+
+ def write_internal_structs(self, f):
+ if self.subcommands:
+ subcommand_list = sorted(self.subcommands, key=lambda cmd: cmd.name)
+ for subcommand in subcommand_list:
+ subcommand.write_internal_structs(f)
+
+ f.write("/* %s command table */\n" % self.fullname())
+ f.write("struct COMMAND_STRUCT %s[] = {\n" % self.subcommand_table_name())
+ for subcommand in subcommand_list:
+ f.write("{%s},\n" % subcommand.struct_code())
+ f.write("{0}\n")
+ f.write("};\n\n")
+
+ f.write("/********** %s ********************/\n\n" % self.fullname())
+
+ f.write("#ifndef SKIP_CMD_HISTORY_TABLE\n")
+ f.write("/* %s history */\n" % self.fullname())
+ code = self.history_code()
+ if code:
+ f.write("commandHistory %s[] = {\n" % self.history_table_name())
+ f.write("%s" % code)
+ f.write("};\n")
+ else:
+ f.write("#define %s NULL\n" % self.history_table_name())
+ f.write("#endif\n\n")
+
+ f.write("#ifndef SKIP_CMD_TIPS_TABLE\n")
+ f.write("/* %s tips */\n" % self.fullname())
+ code = self.tips_code()
+ if code:
+ f.write("const char *%s[] = {\n" % self.tips_table_name())
+ f.write("%s" % code)
+ f.write("};\n")
+ else:
+ f.write("#define %s NULL\n" % self.tips_table_name())
+ f.write("#endif\n\n")
+
+ f.write("#ifndef SKIP_CMD_KEY_SPECS_TABLE\n")
+ f.write("/* %s key specs */\n" % self.fullname())
+ code = self.key_specs_code()
+ if code:
+ f.write("keySpec %s[%d] = {\n" % (self.key_specs_table_name(), len(self.key_specs)))
+ f.write("%s\n" % code)
+ f.write("};\n")
+ else:
+ f.write("#define %s NULL\n" % self.key_specs_table_name())
+ f.write("#endif\n\n")
+
+ if self.args:
+ for arg in self.args:
+ arg.write_internal_structs(f)
+
+ f.write("/* %s argument table */\n" % self.fullname())
+ f.write("struct COMMAND_ARG %s[] = {\n" % self.arg_table_name())
+ for arg in self.args:
+ f.write("{%s},\n" % arg.struct_code())
+ f.write("};\n\n")
+
+ if self.reply_schema and args.with_reply_schema:
+ self.reply_schema.write(f)
+
+
+class Subcommand(Command):
+ def __init__(self, name, desc):
+ self.container_name = desc["container"].upper()
+ super(Subcommand, self).__init__(name, desc)
+
+ def fullname(self):
+ return "%s %s" % (self.container_name, self.name.replace("-", "_").replace(":", ""))
+
+
+def create_command(name, desc):
+ if desc.get("container"):
+ cmd = Subcommand(name.upper(), desc)
+ subcommands.setdefault(desc["container"].upper(), {})[name] = cmd
+ else:
+ cmd = Command(name.upper(), desc)
+ commands[name.upper()] = cmd
+
+
+# MAIN
+
+# Figure out where the sources are
+srcdir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../src")
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--with-reply-schema', action='store_true')
+args = parser.parse_args()
+
+# Create all command objects
+print("Processing json files...")
+for filename in glob.glob('%s/commands/*.json' % srcdir):
+ with open(filename, "r") as f:
+ try:
+ d = json.load(f)
+ for name, desc in d.items():
+ create_command(name, desc)
+ except json.decoder.JSONDecodeError as err:
+ print("Error processing %s: %s" % (filename, err))
+ exit(1)
+
+# Link subcommands to containers
+print("Linking container command to subcommands...")
+for command in commands.values():
+ assert command.group
+ if command.name not in subcommands:
+ continue
+ for subcommand in subcommands[command.name].values():
+ assert not subcommand.group or subcommand.group == command.group
+ subcommand.group = command.group
+ command.subcommands.append(subcommand)
+
+check_command_error_counter = 0 # An error counter is used to count errors in command checking.
+
+print("Checking all commands...")
+for command in commands.values():
+ if not check_command_key_specs(command):
+ check_command_error_counter += 1
+
+if check_command_error_counter != 0:
+ print("Error: There are errors in the commands check, please check the above logs.")
+ exit(1)
+
+commands_filename = "commands_with_reply_schema" if args.with_reply_schema else "commands"
+print("Generating %s.def..." % commands_filename)
+with open("%s/%s.def" % (srcdir, commands_filename), "w") as f:
+ f.write("/* Automatically generated by %s, do not edit. */\n\n" % os.path.basename(__file__))
+ f.write(
+"""
+/* We have fabulous commands from
+ * the fantastic
+ * Redis Command Table! */
+
+/* Must match redisCommandGroup */
+const char *COMMAND_GROUP_STR[] = {
+ "generic",
+ "string",
+ "list",
+ "set",
+ "sorted-set",
+ "hash",
+ "pubsub",
+ "transactions",
+ "connection",
+ "server",
+ "scripting",
+ "hyperloglog",
+ "cluster",
+ "sentinel",
+ "geo",
+ "stream",
+ "bitmap",
+ "module"
+};
+
+const char *commandGroupStr(int index) {
+ return COMMAND_GROUP_STR[index];
+}
+"""
+ )
+
+ command_list = sorted(commands.values(), key=lambda cmd: (cmd.group, cmd.name))
+ for command in command_list:
+ command.write_internal_structs(f)
+
+ f.write("/* Main command table */\n")
+ f.write("struct COMMAND_STRUCT redisCommandTable[] = {\n")
+ curr_group = None
+ for command in command_list:
+ if curr_group != command.group:
+ curr_group = command.group
+ f.write("/* %s */\n" % curr_group)
+ f.write("{%s},\n" % command.struct_code())
+ f.write("{0}\n")
+ f.write("};\n")
+
+print("All done, exiting.")
diff --git a/utils/generate-commands-json.py b/utils/generate-commands-json.py
new file mode 100755
index 0000000..23782ea
--- /dev/null
+++ b/utils/generate-commands-json.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python3
+import argparse
+import json
+import os
+import subprocess
+from collections import OrderedDict
+from sys import argv
+
+
+def convert_flags_to_boolean_dict(flags):
+ """Return a dict with a key set to `True` per element in the flags list."""
+ return {f: True for f in flags}
+
+
+def set_if_not_none_or_empty(dst, key, value):
+ """Set 'key' in 'dst' if 'value' is not `None` or an empty list."""
+ if value is not None and (type(value) is not list or len(value)):
+ dst[key] = value
+
+
+def convert_argument(arg):
+ """Transform an argument."""
+ arg.update(convert_flags_to_boolean_dict(arg.pop('flags', [])))
+ set_if_not_none_or_empty(arg, 'arguments',
+ [convert_argument(x) for x in arg.pop('arguments', [])])
+ return arg
+
+
+def convert_keyspec(spec):
+ """Transform a key spec."""
+ spec.update(convert_flags_to_boolean_dict(spec.pop('flags', [])))
+ return spec
+
+
+def convert_entry_to_objects_array(cmd, docs):
+ """Transform the JSON output of `COMMAND` to a friendlier format.
+
+ cmd is the output of `COMMAND` as follows:
+ 1. Name (lower case, e.g. "lolwut")
+ 2. Arity
+ 3. Flags
+ 4-6. First/last/step key specification (deprecated as of Redis v7.0)
+ 7. ACL categories
+ 8. hints (as of Redis 7.0)
+ 9. key-specs (as of Redis 7.0)
+ 10. subcommands (as of Redis 7.0)
+
+ docs is the output of `COMMAND DOCS`, which holds a map of additional metadata
+
+ This returns a list with a dict for the command and per each of its
+ subcommands. Each dict contains one key, the command's full name, with a
+ value of a dict that's set with the command's properties and meta
+ information."""
+ assert len(cmd) >= 9
+ obj = {}
+ rep = [obj]
+ name = cmd[0].upper()
+ arity = cmd[1]
+ command_flags = cmd[2]
+ acl_categories = cmd[6]
+ hints = cmd[7]
+ keyspecs = cmd[8]
+ subcommands = cmd[9] if len(cmd) > 9 else []
+ key = name.replace('|', ' ')
+
+ subcommand_docs = docs.pop('subcommands', [])
+ rep.extend([convert_entry_to_objects_array(x, subcommand_docs[x[0]])[0] for x in subcommands])
+
+ # The command's value is ordered so the interesting stuff that we care about
+ # is at the start. Optional `None` and empty list values are filtered out.
+ value = OrderedDict()
+ value['summary'] = docs.pop('summary')
+ value['since'] = docs.pop('since')
+ value['group'] = docs.pop('group')
+ set_if_not_none_or_empty(value, 'complexity', docs.pop('complexity', None))
+ set_if_not_none_or_empty(value, 'deprecated_since', docs.pop('deprecated_since', None))
+ set_if_not_none_or_empty(value, 'replaced_by', docs.pop('replaced_by', None))
+ set_if_not_none_or_empty(value, 'history', docs.pop('history', []))
+ set_if_not_none_or_empty(value, 'acl_categories', acl_categories)
+ value['arity'] = arity
+ set_if_not_none_or_empty(value, 'key_specs',
+ [convert_keyspec(x) for x in keyspecs])
+ set_if_not_none_or_empty(value, 'arguments',
+ [convert_argument(x) for x in docs.pop('arguments', [])])
+ set_if_not_none_or_empty(value, 'command_flags', command_flags)
+ set_if_not_none_or_empty(value, 'doc_flags', docs.pop('doc_flags', []))
+ set_if_not_none_or_empty(value, 'hints', hints)
+
+ # All remaining docs key-value tuples, if any, are appended to the command
+ # to be future-proof.
+ while len(docs) > 0:
+ (k, v) = docs.popitem()
+ value[k] = v
+
+ obj[key] = value
+ return rep
+
+
+# Figure out where the sources are
+srcdir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../src")
+
+# MAIN
+if __name__ == '__main__':
+ opts = {
+ 'description': 'Transform the output from `redis-cli --json` using COMMAND and COMMAND DOCS to a single commands.json format.',
+ 'epilog': f'Usage example: {argv[0]} --cli src/redis-cli --port 6379 > commands.json'
+ }
+ parser = argparse.ArgumentParser(**opts)
+ parser.add_argument('--host', type=str, default='localhost')
+ parser.add_argument('--port', type=int, default=6379)
+ parser.add_argument('--cli', type=str, default='%s/redis-cli' % srcdir)
+ args = parser.parse_args()
+
+ payload = OrderedDict()
+ cmds = []
+
+ p = subprocess.Popen([args.cli, '-h', args.host, '-p', str(args.port), '--json', 'command'], stdout=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ commands = json.loads(stdout)
+
+ p = subprocess.Popen([args.cli, '-h', args.host, '-p', str(args.port), '--json', 'command', 'docs'],
+ stdout=subprocess.PIPE)
+ stdout, stderr = p.communicate()
+ docs = json.loads(stdout)
+
+ for entry in commands:
+ cmd = convert_entry_to_objects_array(entry, docs[entry[0]])
+ cmds.extend(cmd)
+
+ # The final output is a dict of all commands, ordered by name.
+ cmds.sort(key=lambda x: list(x.keys())[0])
+ for cmd in cmds:
+ name = list(cmd.keys())[0]
+ payload[name] = cmd[name]
+
+ print(json.dumps(payload, indent=4))
diff --git a/utils/generate-module-api-doc.rb b/utils/generate-module-api-doc.rb
new file mode 100755
index 0000000..8829434
--- /dev/null
+++ b/utils/generate-module-api-doc.rb
@@ -0,0 +1,205 @@
+#!/usr/bin/env ruby
+# coding: utf-8
+# gendoc.rb -- Converts the top-comments inside module.c to modules API
+# reference documentation in markdown format.
+
+# Convert the C comment to markdown
+def markdown(s)
+ s = s.gsub(/\*\/$/,"")
+ s = s.gsub(/^ ?\* ?/,"")
+ s = s.gsub(/^\/\*\*? ?/,"")
+ s.chop! while s[-1] == "\n" || s[-1] == " "
+ lines = s.split("\n")
+ newlines = []
+ # Fix some markdown
+ lines.each{|l|
+ # Rewrite RM_Xyz() to RedisModule_Xyz().
+ l = l.gsub(/(?<![A-Z_])RM_(?=[A-Z])/, 'RedisModule_')
+ # Fix more markdown, except in code blocks indented by 4 spaces, which we
+ # don't want to mess with.
+ if not l.start_with?(' ')
+ # Add backquotes around RedisModule functions and type where missing.
+ l = l.gsub(/(?<!`)RedisModule[A-z]+(?:\*?\(\))?/){|x| "`#{x}`"}
+ # Add backquotes around c functions like malloc() where missing.
+ l = l.gsub(/(?<![`A-z.])[a-z_]+\(\)/, '`\0`')
+ # Add backquotes around macro and var names containing underscores.
+ l = l.gsub(/(?<![`A-z\*])[A-Za-z]+_[A-Za-z0-9_]+/){|x| "`#{x}`"}
+ # Link URLs preceded by space or newline (not already linked)
+ l = l.gsub(/(^| )(https?:\/\/[A-Za-z0-9_\/\.\-]+[A-Za-z0-9\/])/,
+ '\1[\2](\2)')
+ # Replace double-dash with unicode ndash
+ l = l.gsub(/ -- /, ' – ')
+ end
+ # Link function names to their definition within the page
+ l = l.gsub(/`(RedisModule_[A-z0-9]+)[()]*`/) {|x|
+ $index[$1] ? "[#{x}](\##{$1})" : x
+ }
+ newlines << l
+ }
+ return newlines.join("\n")
+end
+
+# Linebreak a prototype longer than 80 characters on the commas, but only
+# between balanced parentheses so that we don't linebreak args which are
+# function pointers, and then aligning each arg under each other.
+def linebreak_proto(proto, indent)
+ if proto.bytesize <= 80
+ return proto
+ end
+ parts = proto.split(/,\s*/);
+ if parts.length == 1
+ return proto;
+ end
+ align_pos = proto.index("(") + 1;
+ align = " " * align_pos
+ result = parts.shift;
+ bracket_balance = 0;
+ parts.each{|part|
+ if bracket_balance == 0
+ result += ",\n" + indent + align
+ else
+ result += ", "
+ end
+ result += part
+ bracket_balance += part.count("(") - part.count(")")
+ }
+ return result;
+end
+
+# Given the source code array and the index at which an exported symbol was
+# detected, extracts and outputs the documentation.
+def docufy(src,i)
+ m = /RM_[A-z0-9]+/.match(src[i])
+ name = m[0]
+ name = name.sub("RM_","RedisModule_")
+ proto = src[i].sub("{","").strip+";\n"
+ proto = proto.sub("RM_","RedisModule_")
+ proto = linebreak_proto(proto, " ");
+ # Add a link target with the function name. (We don't trust the exact id of
+ # the generated one, which depends on the Markdown implementation.)
+ puts "<span id=\"#{name}\"></span>\n\n"
+ puts "### `#{name}`\n\n"
+ puts " #{proto}\n"
+ puts "**Available since:** #{$since[name] or "unreleased"}\n\n"
+ comment = ""
+ while true
+ i = i-1
+ comment = src[i]+comment
+ break if src[i] =~ /\/\*/
+ end
+ comment = markdown(comment)
+ puts comment+"\n\n"
+end
+
+# Print a comment from line until */ is found, as markdown.
+def section_doc(src, i)
+ name = get_section_heading(src, i)
+ comment = "<span id=\"#{section_name_to_id(name)}\"></span>\n\n"
+ while true
+ # append line, except if it's a horizontal divider
+ comment = comment + src[i] if src[i] !~ /^[\/ ]?\*{1,2} ?-{50,}/
+ break if src[i] =~ /\*\//
+ i = i+1
+ end
+ comment = markdown(comment)
+ puts comment+"\n\n"
+end
+
+# generates an id suitable for links within the page
+def section_name_to_id(name)
+ return "section-" +
+ name.strip.downcase.gsub(/[^a-z0-9]+/, '-').gsub(/^-+|-+$/, '')
+end
+
+# Returns the name of the first section heading in the comment block for which
+# is_section_doc(src, i) is true
+def get_section_heading(src, i)
+ if src[i] =~ /^\/\*\*? \#+ *(.*)/
+ heading = $1
+ elsif src[i+1] =~ /^ ?\* \#+ *(.*)/
+ heading = $1
+ end
+ return heading.gsub(' -- ', ' – ')
+end
+
+# Returns true if the line is the start of a generic documentation section. Such
+# section must start with the # symbol, i.e. a markdown heading, on the first or
+# the second line.
+def is_section_doc(src, i)
+ return src[i] =~ /^\/\*\*? \#/ ||
+ (src[i] =~ /^\/\*/ && src[i+1] =~ /^ ?\* \#/)
+end
+
+def is_func_line(src, i)
+ line = src[i]
+ return line =~ /RM_/ &&
+ line[0] != ' ' && line[0] != '#' && line[0] != '/' &&
+ src[i-1] =~ /\*\//
+end
+
+puts "---\n"
+puts "title: \"Modules API reference\"\n"
+puts "linkTitle: \"API reference\"\n"
+puts "weight: 1\n"
+puts "description: >\n"
+puts " Reference for the Redis Modules API\n"
+puts "aliases:\n"
+puts " - /topics/modules-api-ref\n"
+puts "---\n"
+puts "\n"
+puts "<!-- This file is generated from module.c using\n"
+puts " utils/generate-module-api-doc.rb -->\n\n"
+src = File.open(File.dirname(__FILE__) ++ "/../src/module.c").to_a
+
+# Build function index
+$index = {}
+src.each_with_index do |line,i|
+ if is_func_line(src, i)
+ line =~ /RM_([A-z0-9]+)/
+ name = "RedisModule_#{$1}"
+ $index[name] = true
+ end
+end
+
+# Populate the 'since' map (name => version) if we're in a git repo.
+$since = {}
+git_dir = File.dirname(__FILE__) ++ "/../.git"
+if File.directory?(git_dir) && `which git` != ""
+ `git --git-dir="#{git_dir}" tag --sort=v:refname`.each_line do |version|
+ next if version !~ /^(\d+)\.\d+\.\d+?$/ || $1.to_i < 4
+ version.chomp!
+ `git --git-dir="#{git_dir}" cat-file blob "#{version}:src/module.c"`.each_line do |line|
+ if line =~ /^\w.*[ \*]RM_([A-z0-9]+)/
+ name = "RedisModule_#{$1}"
+ if ! $since[name]
+ $since[name] = version
+ end
+ end
+ end
+ end
+end
+
+# Print TOC
+puts "## Sections\n\n"
+src.each_with_index do |_line,i|
+ if is_section_doc(src, i)
+ name = get_section_heading(src, i)
+ puts "* [#{name}](\##{section_name_to_id(name)})\n"
+ end
+end
+puts "* [Function index](#section-function-index)\n\n"
+
+# Docufy: Print function prototype and markdown docs
+src.each_with_index do |_line,i|
+ if is_func_line(src, i)
+ docufy(src, i)
+ elsif is_section_doc(src, i)
+ section_doc(src, i)
+ end
+end
+
+# Print function index
+puts "<span id=\"section-function-index\"></span>\n\n"
+puts "## Function index\n\n"
+$index.keys.sort.each{|x| puts "* [`#{x}`](\##{x})\n"}
+puts "\n"
diff --git a/utils/graphs/commits-over-time/README.md b/utils/graphs/commits-over-time/README.md
new file mode 100644
index 0000000..b28019e
--- /dev/null
+++ b/utils/graphs/commits-over-time/README.md
@@ -0,0 +1,16 @@
+This Tcl script is what I used in order to generate the graph you
+can find at http://antirez.com/news/98. It's really quick & dirty, more
+a trow away program than anything else, but probably could be reused or
+modified in the future in order to visualize other similar data or an
+updated version of the same data.
+
+The usage is trivial:
+
+ ./genhtml.tcl > output.html
+
+The generated HTML is quite broken but good enough to grab a screenshot
+from the browser. Feel free to improve it if you got time / interest.
+
+Note that the code filtering the tags, and the hardcoded branch name, does
+not make the script, as it is, able to analyze a different repository.
+However the changes needed are trivial.
diff --git a/utils/graphs/commits-over-time/genhtml.tcl b/utils/graphs/commits-over-time/genhtml.tcl
new file mode 100755
index 0000000..c4b4e09
--- /dev/null
+++ b/utils/graphs/commits-over-time/genhtml.tcl
@@ -0,0 +1,96 @@
+#!/usr/bin/env tclsh
+
+# Load commits history as "sha1 unixtime".
+set commits [exec git log unstable {--pretty="%H %at"}]
+set raw_tags [exec git tag]
+
+# Load all the tags that are about stable releases.
+foreach tag $raw_tags {
+ if {[string match v*-stable $tag]} {
+ set tag [string range $tag 1 end-7]
+ puts $tag
+ }
+ if {[regexp {^[0-9]+.[0-9]+.[0-9]+$} $tag]} {
+ lappend tags $tag
+ }
+}
+
+# For each tag, create a list of "name unixtime"
+foreach tag $tags {
+ set taginfo [exec git log $tag -n 1 "--pretty=\"$tag %at\""]
+ set taginfo [string trim $taginfo {"}]
+ lappend labels $taginfo
+}
+
+# For each commit, check the amount of code changed and create an array
+# mapping the commit to the number of lines affected.
+foreach c $commits {
+ set stat [exec git show --oneline --numstat [lindex $c 0]]
+ set linenum 0
+ set affected 0
+ foreach line [split $stat "\n"] {
+ incr linenum
+ if {$linenum == 1 || [string match *deps/* $line]} continue
+ if {[catch {llength $line} numfields]} continue
+ if {$numfields == 0} continue
+ catch {
+ incr affected [lindex $line 0]
+ incr affected [lindex $line 1]
+ }
+ }
+ set commit_to_affected([lindex $c 0]) $affected
+}
+
+set base_time [lindex [lindex $commits end] 1]
+puts [clock format $base_time]
+
+# Generate a graph made of HTML DIVs.
+puts {<html>
+<style>
+.box {
+ position:absolute;
+ width:10px;
+ height:5px;
+ border:1px black solid;
+ background-color:#44aa33;
+ opacity: 0.04;
+}
+.label {
+ position:absolute;
+ background-color:#dddddd;
+ font-family:helvetica;
+ font-size:12px;
+ padding:2px;
+ color:#666;
+ border:1px #aaa solid;
+ border-radius: 5px;
+}
+#outer {
+ position:relative;
+ width:1500;
+ height:500;
+ border:1px #aaa solid;
+}
+</style>
+<div id="outer">
+}
+foreach c $commits {
+ set sha [lindex $c 0]
+ set t [expr {([lindex $c 1]-$base_time)/(3600*24*2)}]
+ set affected [expr $commit_to_affected($sha)]
+ set left $t
+ set height [expr {log($affected)*20}]
+ puts "<div class=\"box\" style=\"left:$left; bottom:0; height:$height\"></div>"
+}
+
+set bottom -30
+foreach l $labels {
+ set name [lindex $l 0]
+ set t [expr {([lindex $l 1]-$base_time)/(3600*24*2)}]
+ set left $t
+ if {$left < 0} continue
+ incr bottom -20
+ if {$bottom == -210} {set bottom -30}
+ puts "<div class=\"label\" style=\"left:$left; bottom:$bottom\">$name</div>"
+}
+puts {</div></html>}
diff --git a/utils/hyperloglog/.gitignore b/utils/hyperloglog/.gitignore
new file mode 100644
index 0000000..2211df6
--- /dev/null
+++ b/utils/hyperloglog/.gitignore
@@ -0,0 +1 @@
+*.txt
diff --git a/utils/hyperloglog/hll-err.rb b/utils/hyperloglog/hll-err.rb
new file mode 100644
index 0000000..2c71ac5
--- /dev/null
+++ b/utils/hyperloglog/hll-err.rb
@@ -0,0 +1,27 @@
+# hll-err.rb - Copyright (C) 2014 Salvatore Sanfilippo
+# BSD license, See the COPYING file for more information.
+#
+# Check error of HyperLogLog Redis implementation for different set sizes.
+
+require 'rubygems'
+require 'redis'
+require 'digest/sha1'
+
+r = Redis.new
+r.del('hll')
+i = 0
+while true do
+ 100.times {
+ elements = []
+ 1000.times {
+ ele = Digest::SHA1.hexdigest(i.to_s)
+ elements << ele
+ i += 1
+ }
+ r.pfadd('hll',elements)
+ }
+ approx = r.pfcount('hll')
+ abs_err = (approx-i).abs
+ rel_err = 100.to_f*abs_err/i
+ puts "#{i} vs #{approx}: #{rel_err}%"
+end
diff --git a/utils/hyperloglog/hll-gnuplot-graph.rb b/utils/hyperloglog/hll-gnuplot-graph.rb
new file mode 100644
index 0000000..6c7596d
--- /dev/null
+++ b/utils/hyperloglog/hll-gnuplot-graph.rb
@@ -0,0 +1,88 @@
+# hll-err.rb - Copyright (C) 2014 Salvatore Sanfilippo
+# BSD license, See the COPYING file for more information.
+#
+# This program is suited to output average and maximum errors of
+# the Redis HyperLogLog implementation in a format suitable to print
+# graphs using gnuplot.
+
+require 'rubygems'
+require 'redis'
+require 'digest/sha1'
+
+# Generate an array of [cardinality,relative_error] pairs
+# in the 0 - max range, with the specified step.
+#
+# 'r' is the Redis object used to perform the queries.
+# 'seed' must be different every time you want a test performed
+# with a different set. The function guarantees that if 'seed' is the
+# same, exactly the same dataset is used, and when it is different,
+# a totally unrelated different data set is used (without any common
+# element in practice).
+def run_experiment(r,seed,max,step)
+ r.del('hll')
+ i = 0
+ samples = []
+ step = 1000 if step > 1000
+ while i < max do
+ elements = []
+ step.times {
+ ele = Digest::SHA1.hexdigest(i.to_s+seed.to_s)
+ elements << ele
+ i += 1
+ }
+ r.pfadd('hll',elements)
+ approx = r.pfcount('hll')
+ err = approx-i
+ rel_err = 100.to_f*err/i
+ samples << [i,rel_err]
+ end
+ samples
+end
+
+def filter_samples(numsets,max,step,filter)
+ r = Redis.new
+ dataset = {}
+ (0...numsets).each{|i|
+ dataset[i] = run_experiment(r,i,max,step)
+ STDERR.puts "Set #{i}"
+ }
+ dataset[0].each_with_index{|ele,index|
+ if filter == :max
+ card=ele[0]
+ err=ele[1].abs
+ (1...numsets).each{|i|
+ err = dataset[i][index][1] if err < dataset[i][index][1]
+ }
+ puts "#{card} #{err}"
+ elsif filter == :avg
+ card=ele[0]
+ err = 0
+ (0...numsets).each{|i|
+ err += dataset[i][index][1]
+ }
+ err /= numsets
+ puts "#{card} #{err}"
+ elsif filter == :absavg
+ card=ele[0]
+ err = 0
+ (0...numsets).each{|i|
+ err += dataset[i][index][1].abs
+ }
+ err /= numsets
+ puts "#{card} #{err}"
+ elsif filter == :all
+ (0...numsets).each{|i|
+ card,err = dataset[i][index]
+ puts "#{card} #{err}"
+ }
+ else
+ raise "Unknown filter #{filter}"
+ end
+ }
+end
+
+if ARGV.length != 4
+ puts "Usage: hll-gnuplot-graph <samples> <max> <step> (max|avg|absavg|all)"
+ exit 1
+end
+filter_samples(ARGV[0].to_i,ARGV[1].to_i,ARGV[2].to_i,ARGV[3].to_sym)
diff --git a/utils/install_server.sh b/utils/install_server.sh
new file mode 100755
index 0000000..efda7da
--- /dev/null
+++ b/utils/install_server.sh
@@ -0,0 +1,291 @@
+#!/bin/sh
+
+# Copyright 2011 Dvir Volk <dvirsk at gmail dot com>. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+# EVENT SHALL Dvir Volk OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+################################################################################
+#
+# Service installer for redis server, runs interactively by default.
+#
+# To run this script non-interactively (for automation/provisioning purposes),
+# feed the variables into the script. Any missing variables will be prompted!
+# Tip: Environment variables also support command substitution (see REDIS_EXECUTABLE)
+#
+# Example:
+#
+# sudo REDIS_PORT=1234 \
+# REDIS_CONFIG_FILE=/etc/redis/1234.conf \
+# REDIS_LOG_FILE=/var/log/redis_1234.log \
+# REDIS_DATA_DIR=/var/lib/redis/1234 \
+# REDIS_EXECUTABLE=`command -v redis-server` ./utils/install_server.sh
+#
+# This generates a redis config file and an /etc/init.d script, and installs them.
+#
+# /!\ This script should be run as root
+#
+# NOTE: This script will not work on Mac OSX.
+# It supports Debian and Ubuntu Linux.
+#
+################################################################################
+
+die () {
+ echo "ERROR: $1. Aborting!"
+ exit 1
+}
+
+
+#Absolute path to this script
+SCRIPT=$(readlink -f $0)
+#Absolute path this script is in
+SCRIPTPATH=$(dirname $SCRIPT)
+
+#Initial defaults
+_REDIS_PORT=6379
+_MANUAL_EXECUTION=false
+
+echo "Welcome to the redis service installer"
+echo "This script will help you easily set up a running redis server"
+echo
+
+#check for root user
+if [ "$(id -u)" -ne 0 ] ; then
+ echo "You must run this script as root. Sorry!"
+ exit 1
+fi
+
+#bail if this system is managed by systemd
+_pid_1_exe="$(readlink -f /proc/1/exe)"
+if [ "${_pid_1_exe##*/}" = systemd ]
+then
+ echo "This systems seems to use systemd."
+ echo "Please take a look at the provided example service unit files in this directory, and adapt and install them. Sorry!"
+ exit 1
+fi
+unset _pid_1_exe
+
+if ! echo $REDIS_PORT | egrep -q '^[0-9]+$' ; then
+ _MANUAL_EXECUTION=true
+ #Read the redis port
+ read -p "Please select the redis port for this instance: [$_REDIS_PORT] " REDIS_PORT
+ if ! echo $REDIS_PORT | egrep -q '^[0-9]+$' ; then
+ echo "Selecting default: $_REDIS_PORT"
+ REDIS_PORT=$_REDIS_PORT
+ fi
+fi
+
+if [ -z "$REDIS_CONFIG_FILE" ] ; then
+ _MANUAL_EXECUTION=true
+ #read the redis config file
+ _REDIS_CONFIG_FILE="/etc/redis/$REDIS_PORT.conf"
+ read -p "Please select the redis config file name [$_REDIS_CONFIG_FILE] " REDIS_CONFIG_FILE
+ if [ -z "$REDIS_CONFIG_FILE" ] ; then
+ REDIS_CONFIG_FILE=$_REDIS_CONFIG_FILE
+ echo "Selected default - $REDIS_CONFIG_FILE"
+ fi
+fi
+
+if [ -z "$REDIS_LOG_FILE" ] ; then
+ _MANUAL_EXECUTION=true
+ #read the redis log file path
+ _REDIS_LOG_FILE="/var/log/redis_$REDIS_PORT.log"
+ read -p "Please select the redis log file name [$_REDIS_LOG_FILE] " REDIS_LOG_FILE
+ if [ -z "$REDIS_LOG_FILE" ] ; then
+ REDIS_LOG_FILE=$_REDIS_LOG_FILE
+ echo "Selected default - $REDIS_LOG_FILE"
+ fi
+fi
+
+if [ -z "$REDIS_DATA_DIR" ] ; then
+ _MANUAL_EXECUTION=true
+ #get the redis data directory
+ _REDIS_DATA_DIR="/var/lib/redis/$REDIS_PORT"
+ read -p "Please select the data directory for this instance [$_REDIS_DATA_DIR] " REDIS_DATA_DIR
+ if [ -z "$REDIS_DATA_DIR" ] ; then
+ REDIS_DATA_DIR=$_REDIS_DATA_DIR
+ echo "Selected default - $REDIS_DATA_DIR"
+ fi
+fi
+
+if [ ! -x "$REDIS_EXECUTABLE" ] ; then
+ _MANUAL_EXECUTION=true
+ #get the redis executable path
+ _REDIS_EXECUTABLE=`command -v redis-server`
+ read -p "Please select the redis executable path [$_REDIS_EXECUTABLE] " REDIS_EXECUTABLE
+ if [ ! -x "$REDIS_EXECUTABLE" ] ; then
+ REDIS_EXECUTABLE=$_REDIS_EXECUTABLE
+
+ if [ ! -x "$REDIS_EXECUTABLE" ] ; then
+ echo "Mmmmm... it seems like you don't have a redis executable. Did you run make install yet?"
+ exit 1
+ fi
+ fi
+fi
+
+#check the default for redis cli
+CLI_EXEC=`command -v redis-cli`
+if [ -z "$CLI_EXEC" ] ; then
+ CLI_EXEC=`dirname $REDIS_EXECUTABLE`"/redis-cli"
+fi
+
+echo "Selected config:"
+
+echo "Port : $REDIS_PORT"
+echo "Config file : $REDIS_CONFIG_FILE"
+echo "Log file : $REDIS_LOG_FILE"
+echo "Data dir : $REDIS_DATA_DIR"
+echo "Executable : $REDIS_EXECUTABLE"
+echo "Cli Executable : $CLI_EXEC"
+
+if $_MANUAL_EXECUTION == true ; then
+ read -p "Is this ok? Then press ENTER to go on or Ctrl-C to abort." _UNUSED_
+fi
+
+mkdir -p `dirname "$REDIS_CONFIG_FILE"` || die "Could not create redis config directory"
+mkdir -p `dirname "$REDIS_LOG_FILE"` || die "Could not create redis log dir"
+mkdir -p "$REDIS_DATA_DIR" || die "Could not create redis data directory"
+
+#render the templates
+TMP_FILE="/tmp/${REDIS_PORT}.conf"
+DEFAULT_CONFIG="${SCRIPTPATH}/../redis.conf"
+INIT_TPL_FILE="${SCRIPTPATH}/redis_init_script.tpl"
+INIT_SCRIPT_DEST="/etc/init.d/redis_${REDIS_PORT}"
+PIDFILE="/var/run/redis_${REDIS_PORT}.pid"
+
+if [ ! -f "$DEFAULT_CONFIG" ]; then
+ echo "Mmmmm... the default config is missing. Did you switch to the utils directory?"
+ exit 1
+fi
+
+#Generate config file from the default config file as template
+#changing only the stuff we're controlling from this script
+echo "## Generated by install_server.sh ##" > $TMP_FILE
+
+read -r SED_EXPR <<-EOF
+s#^port .\+#port ${REDIS_PORT}#; \
+s#^logfile .\+#logfile ${REDIS_LOG_FILE}#; \
+s#^dir .\+#dir ${REDIS_DATA_DIR}#; \
+s#^pidfile .\+#pidfile ${PIDFILE}#; \
+s#^daemonize no#daemonize yes#;
+EOF
+sed "$SED_EXPR" $DEFAULT_CONFIG >> $TMP_FILE
+
+#cat $TPL_FILE | while read line; do eval "echo \"$line\"" >> $TMP_FILE; done
+cp $TMP_FILE $REDIS_CONFIG_FILE || die "Could not write redis config file $REDIS_CONFIG_FILE"
+
+#Generate sample script from template file
+rm -f $TMP_FILE
+
+#we hard code the configs here to avoid issues with templates containing env vars
+#kinda lame but works!
+REDIS_INIT_HEADER=\
+"#!/bin/sh\n
+#Configurations injected by install_server below....\n\n
+EXEC=$REDIS_EXECUTABLE\n
+CLIEXEC=$CLI_EXEC\n
+PIDFILE=\"$PIDFILE\"\n
+CONF=\"$REDIS_CONFIG_FILE\"\n\n
+REDISPORT=\"$REDIS_PORT\"\n\n
+###############\n\n"
+
+REDIS_CHKCONFIG_INFO=\
+"# REDHAT chkconfig header\n\n
+# chkconfig: - 58 74\n
+# description: redis_${REDIS_PORT} is the redis daemon.\n
+### BEGIN INIT INFO\n
+# Provides: redis_6379\n
+# Required-Start: \$network \$local_fs \$remote_fs\n
+# Required-Stop: \$network \$local_fs \$remote_fs\n
+# Default-Start: 2 3 4 5\n
+# Default-Stop: 0 1 6\n
+# Should-Start: \$syslog \$named\n
+# Should-Stop: \$syslog \$named\n
+# Short-Description: start and stop redis_${REDIS_PORT}\n
+# Description: Redis daemon\n
+### END INIT INFO\n\n"
+
+if command -v chkconfig >/dev/null; then
+ #if we're a box with chkconfig on it we want to include info for chkconfig
+ echo "$REDIS_INIT_HEADER" "$REDIS_CHKCONFIG_INFO" > $TMP_FILE && cat $INIT_TPL_FILE >> $TMP_FILE || die "Could not write init script to $TMP_FILE"
+else
+ #combine the header and the template (which is actually a static footer)
+ echo "$REDIS_INIT_HEADER" > $TMP_FILE && cat $INIT_TPL_FILE >> $TMP_FILE || die "Could not write init script to $TMP_FILE"
+fi
+
+###
+# Generate sample script from template file
+# - No need to check which system we are on. The init info are comments and
+# do not interfere with update_rc.d systems. Additionally:
+# Ubuntu/debian by default does not come with chkconfig, but does issue a
+# warning if init info is not available.
+
+cat > ${TMP_FILE} <<EOT
+#!/bin/sh
+#Configurations injected by install_server below....
+
+EXEC=$REDIS_EXECUTABLE
+CLIEXEC=$CLI_EXEC
+PIDFILE=$PIDFILE
+CONF="$REDIS_CONFIG_FILE"
+REDISPORT="$REDIS_PORT"
+###############
+# SysV Init Information
+# chkconfig: - 58 74
+# description: redis_${REDIS_PORT} is the redis daemon.
+### BEGIN INIT INFO
+# Provides: redis_${REDIS_PORT}
+# Required-Start: \$network \$local_fs \$remote_fs
+# Required-Stop: \$network \$local_fs \$remote_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Should-Start: \$syslog \$named
+# Should-Stop: \$syslog \$named
+# Short-Description: start and stop redis_${REDIS_PORT}
+# Description: Redis daemon
+### END INIT INFO
+
+EOT
+cat ${INIT_TPL_FILE} >> ${TMP_FILE}
+
+#copy to /etc/init.d
+cp $TMP_FILE $INIT_SCRIPT_DEST && \
+ chmod +x $INIT_SCRIPT_DEST || die "Could not copy redis init script to $INIT_SCRIPT_DEST"
+echo "Copied $TMP_FILE => $INIT_SCRIPT_DEST"
+
+#Install the service
+echo "Installing service..."
+if command -v chkconfig >/dev/null 2>&1; then
+ # we're chkconfig, so lets add to chkconfig and put in runlevel 345
+ chkconfig --add redis_${REDIS_PORT} && echo "Successfully added to chkconfig!"
+ chkconfig --level 345 redis_${REDIS_PORT} on && echo "Successfully added to runlevels 345!"
+elif command -v update-rc.d >/dev/null 2>&1; then
+ #if we're not a chkconfig box assume we're able to use update-rc.d
+ update-rc.d redis_${REDIS_PORT} defaults && echo "Success!"
+else
+ echo "No supported init tool found."
+fi
+
+/etc/init.d/redis_$REDIS_PORT start || die "Failed starting service..."
+
+#tada
+echo "Installation successful!"
+exit 0
diff --git a/utils/lru/README b/utils/lru/README
new file mode 100644
index 0000000..f043b29
--- /dev/null
+++ b/utils/lru/README
@@ -0,0 +1,19 @@
+The test-lru.rb program can be used in order to check the behavior of the
+Redis approximated LRU algorithm against the theoretical output of true
+LRU algorithm.
+
+In order to use the program you need to recompile Redis setting the define
+REDIS_LRU_CLOCK_RESOLUTION to 1, by editing the file server.h.
+This allows to execute the program in a fast way since the 1 ms resolution
+is enough for all the objects to have a different enough time stamp during
+the test.
+
+The program is executed like this:
+
+ ruby test-lru.rb /tmp/lru.html
+
+You can optionally specify a number of times to run, so that the program
+will output averages of different runs, by adding an additional argument.
+For instance in order to run the test 10 times use:
+
+ ruby test-lru.rb /tmp/lru.html 10
diff --git a/utils/lru/lfu-simulation.c b/utils/lru/lfu-simulation.c
new file mode 100644
index 0000000..60105e5
--- /dev/null
+++ b/utils/lru/lfu-simulation.c
@@ -0,0 +1,158 @@
+#include <stdio.h>
+#include <time.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+int decr_every = 1;
+int keyspace_size = 1000000;
+time_t switch_after = 30; /* Switch access pattern after N seconds. */
+
+struct entry {
+ /* Field that the LFU Redis implementation will have (we have
+ * 24 bits of total space in the object->lru field). */
+ uint8_t counter; /* Logarithmic counter. */
+ uint16_t decrtime; /* (Reduced precision) time of last decrement. */
+
+ /* Fields only useful for visualization. */
+ uint64_t hits; /* Number of real accesses. */
+ time_t ctime; /* Key creation time. */
+};
+
+#define to_16bit_minutes(x) ((x/60) & 65535)
+#define LFU_INIT_VAL 5
+
+/* Compute the difference in minutes between two 16 bit minutes times
+ * obtained with to_16bit_minutes(). Since they can wrap around if
+ * we detect the overflow we account for it as if the counter wrapped
+ * a single time. */
+uint16_t minutes_diff(uint16_t now, uint16_t prev) {
+ if (now >= prev) return now-prev;
+ return 65535-prev+now;
+}
+
+/* Increment a counter logarithmically: the greatest is its value, the
+ * less likely is that the counter is really incremented.
+ * The maximum value of the counter is saturated at 255. */
+uint8_t log_incr(uint8_t counter) {
+ if (counter == 255) return counter;
+ double r = (double)rand()/RAND_MAX;
+ double baseval = counter-LFU_INIT_VAL;
+ if (baseval < 0) baseval = 0;
+ double limit = 1.0/(baseval*10+1);
+ if (r < limit) counter++;
+ return counter;
+}
+
+/* Simulate an access to an entry. */
+void access_entry(struct entry *e) {
+ e->counter = log_incr(e->counter);
+ e->hits++;
+}
+
+/* Return the entry LFU value and as a side effect decrement the
+ * entry value if the decrement time was reached. */
+uint8_t scan_entry(struct entry *e) {
+ if (minutes_diff(to_16bit_minutes(time(NULL)),e->decrtime)
+ >= decr_every)
+ {
+ if (e->counter) {
+ if (e->counter > LFU_INIT_VAL*2) {
+ e->counter /= 2;
+ } else {
+ e->counter--;
+ }
+ }
+ e->decrtime = to_16bit_minutes(time(NULL));
+ }
+ return e->counter;
+}
+
+/* Print the entry info. */
+void show_entry(long pos, struct entry *e) {
+ char *tag = "normal ";
+
+ if (pos >= 10 && pos <= 14) tag = "new no access";
+ if (pos >= 15 && pos <= 19) tag = "new accessed ";
+ if (pos >= keyspace_size -5) tag= "old no access";
+
+ printf("%ld] <%s> frequency:%d decrtime:%d [%lu hits | age:%ld sec]\n",
+ pos, tag, e->counter, e->decrtime, (unsigned long)e->hits,
+ time(NULL) - e->ctime);
+}
+
+int main(void) {
+ time_t start = time(NULL);
+ time_t new_entry_time = start;
+ time_t display_time = start;
+ struct entry *entries = malloc(sizeof(*entries)*keyspace_size);
+ long j;
+
+ /* Initialize. */
+ for (j = 0; j < keyspace_size; j++) {
+ entries[j].counter = LFU_INIT_VAL;
+ entries[j].decrtime = to_16bit_minutes(start);
+ entries[j].hits = 0;
+ entries[j].ctime = time(NULL);
+ }
+
+ while(1) {
+ time_t now = time(NULL);
+ long idx;
+
+ /* Scan N random entries (simulates the eviction under maxmemory). */
+ for (j = 0; j < 3; j++) {
+ scan_entry(entries+(rand()%keyspace_size));
+ }
+
+ /* Access a random entry: use a power-law access pattern up to
+ * 'switch_after' seconds. Then revert to flat access pattern. */
+ if (now-start < switch_after) {
+ /* Power law. */
+ idx = 1;
+ while((rand() % 21) != 0 && idx < keyspace_size) idx *= 2;
+ if (idx > keyspace_size) idx = keyspace_size;
+ idx = rand() % idx;
+ } else {
+ /* Flat. */
+ idx = rand() % keyspace_size;
+ }
+
+ /* Never access entries between position 10 and 14, so that
+ * we simulate what happens to new entries that are never
+ * accessed VS new entries which are accessed in positions
+ * 15-19.
+ *
+ * Also never access last 5 entry, so that we have keys which
+ * are never recreated (old), and never accessed. */
+ if ((idx < 10 || idx > 14) && (idx < keyspace_size-5))
+ access_entry(entries+idx);
+
+ /* Simulate the addition of new entries at positions between
+ * 10 and 19, a random one every 10 seconds. */
+ if (new_entry_time <= now) {
+ idx = 10+(rand()%10);
+ entries[idx].counter = LFU_INIT_VAL;
+ entries[idx].decrtime = to_16bit_minutes(time(NULL));
+ entries[idx].hits = 0;
+ entries[idx].ctime = time(NULL);
+ new_entry_time = now+10;
+ }
+
+ /* Show the first 20 entries and the last 20 entries. */
+ if (display_time != now) {
+ printf("=============================\n");
+ printf("Current minutes time: %d\n", (int)to_16bit_minutes(now));
+ printf("Access method: %s\n",
+ (now-start < switch_after) ? "power-law" : "flat");
+
+ for (j = 0; j < 20; j++)
+ show_entry(j,entries+j);
+
+ for (j = keyspace_size-20; j < keyspace_size; j++)
+ show_entry(j,entries+j);
+ display_time = now;
+ }
+ }
+ return 0;
+}
+
diff --git a/utils/lru/test-lru.rb b/utils/lru/test-lru.rb
new file mode 100644
index 0000000..d511e20
--- /dev/null
+++ b/utils/lru/test-lru.rb
@@ -0,0 +1,223 @@
+require 'rubygems'
+require 'redis'
+
+$runs = []; # Remember the error rate of each run for average purposes.
+$o = {}; # Options set parsing arguments
+
+def testit(filename)
+ r = Redis.new
+ r.config("SET","maxmemory","2000000")
+ if $o[:ttl]
+ r.config("SET","maxmemory-policy","volatile-ttl")
+ else
+ r.config("SET","maxmemory-policy","allkeys-lru")
+ end
+ r.config("SET","maxmemory-samples",5)
+ r.config("RESETSTAT")
+ r.flushall
+
+ html = ""
+ html << <<EOF
+ <html>
+ <body>
+ <style>
+ .box {
+ width:5px;
+ height:5px;
+ float:left;
+ margin: 1px;
+ }
+
+ .old {
+ border: 1px black solid;
+ }
+
+ .new {
+ border: 1px green solid;
+ }
+
+ .otherdb {
+ border: 1px red solid;
+ }
+
+ .ex {
+ background-color: #666;
+ }
+ </style>
+ <pre>
+EOF
+
+ # Fill the DB up to the first eviction.
+ oldsize = r.dbsize
+ id = 0
+ while true
+ id += 1
+ begin
+ r.set(id,"foo")
+ rescue
+ break
+ end
+ newsize = r.dbsize
+ break if newsize == oldsize # A key was evicted? Stop.
+ oldsize = newsize
+ end
+
+ inserted = r.dbsize
+ first_set_max_id = id
+ html << "#{r.dbsize} keys inserted.\n"
+
+ # Access keys sequentially, so that in theory the first part will be expired
+ # and the latter part will not, according to perfect LRU.
+
+ if $o[:ttl]
+ STDERR.puts "Set increasing expire value"
+ (1..first_set_max_id).each{|id|
+ r.expire(id,1000+id)
+ STDERR.print(".") if (id % 150) == 0
+ }
+ else
+ STDERR.puts "Access keys sequentially"
+ (1..first_set_max_id).each{|id|
+ r.get(id)
+ sleep 0.001
+ STDERR.print(".") if (id % 150) == 0
+ }
+ end
+ STDERR.puts
+
+ # Insert more 50% keys. We expect that the new keys will rarely be expired
+ # since their last access time is recent compared to the others.
+ #
+ # Note that we insert the first 100 keys of the new set into DB1 instead
+ # of DB0, so that we can try how cross-DB eviction works.
+ half = inserted/2
+ html << "Insert enough keys to evict half the keys we inserted.\n"
+ add = 0
+
+ otherdb_start_idx = id+1
+ otherdb_end_idx = id+100
+ while true
+ add += 1
+ id += 1
+ if id >= otherdb_start_idx && id <= otherdb_end_idx
+ r.select(1)
+ r.set(id,"foo")
+ r.select(0)
+ else
+ r.set(id,"foo")
+ end
+ break if r.info['evicted_keys'].to_i >= half
+ end
+
+ html << "#{add} additional keys added.\n"
+ html << "#{r.dbsize} keys in DB.\n"
+
+ # Check if evicted keys respect LRU
+ # We consider errors from 1 to N progressively more serious as they violate
+ # more the access pattern.
+
+ errors = 0
+ e = 1
+ error_per_key = 100000.0/first_set_max_id
+ half_set_size = first_set_max_id/2
+ maxerr = 0
+ (1..(first_set_max_id/2)).each{|id|
+ if id >= otherdb_start_idx && id <= otherdb_end_idx
+ r.select(1)
+ exists = r.exists(id)
+ r.select(0)
+ else
+ exists = r.exists(id)
+ end
+ if id < first_set_max_id/2
+ thiserr = error_per_key * ((half_set_size-id).to_f/half_set_size)
+ maxerr += thiserr
+ errors += thiserr if exists
+ elsif id >= first_set_max_id/2
+ thiserr = error_per_key * ((id-half_set_size).to_f/half_set_size)
+ maxerr += thiserr
+ errors += thiserr if !exists
+ end
+ }
+ errors = errors*100/maxerr
+
+ STDERR.puts "Test finished with #{errors}% error! Generating HTML on stdout."
+
+ html << "#{errors}% error!\n"
+ html << "</pre>"
+ $runs << errors
+
+ # Generate the graphical representation
+ (1..id).each{|id|
+ # Mark first set and added items in a different way.
+ c = "box"
+ if id >= otherdb_start_idx && id <= otherdb_end_idx
+ c << " otherdb"
+ elsif id <= first_set_max_id
+ c << " old"
+ else
+ c << " new"
+ end
+
+ # Add class if exists
+ if id >= otherdb_start_idx && id <= otherdb_end_idx
+ r.select(1)
+ exists = r.exists(id)
+ r.select(0)
+ else
+ exists = r.exists(id)
+ end
+
+ c << " ex" if exists
+ html << "<div title=\"#{id}\" class=\"#{c}\"></div>"
+ }
+
+ # Close HTML page
+
+ html << <<EOF
+ </body>
+ </html>
+EOF
+
+ f = File.open(filename,"w")
+ f.write(html)
+ f.close
+end
+
+def print_avg
+ avg = ($runs.reduce {|a,b| a+b}) / $runs.length
+ puts "#{$runs.length} runs, AVG is #{avg}"
+end
+
+if ARGV.length < 1
+ STDERR.puts "Usage: ruby test-lru.rb <html-output-filename> [--runs <count>] [--ttl]"
+ STDERR.puts "Options:"
+ STDERR.puts " --runs <count> Execute the test <count> times."
+ STDERR.puts " --ttl Set keys with increasing TTL values"
+ STDERR.puts " (starting from 1000 seconds) in order to"
+ STDERR.puts " test the volatile-lru policy."
+ exit 1
+end
+
+filename = ARGV[0]
+$o[:numruns] = 1
+
+# Options parsing
+i = 1
+while i < ARGV.length
+ if ARGV[i] == '--runs'
+ $o[:numruns] = ARGV[i+1].to_i
+ i+= 1
+ elsif ARGV[i] == '--ttl'
+ $o[:ttl] = true
+ else
+ STDERR.puts "Unknown option #{ARGV[i]}"
+ exit 1
+ end
+ i+= 1
+end
+
+$o[:numruns].times {
+ testit(filename)
+ print_avg if $o[:numruns] != 1
+}
diff --git a/utils/redis-copy.rb b/utils/redis-copy.rb
new file mode 100644
index 0000000..7c5c52d
--- /dev/null
+++ b/utils/redis-copy.rb
@@ -0,0 +1,35 @@
+# redis-copy.rb - Copyright (C) 2009-2010 Salvatore Sanfilippo
+# BSD license, See the COPYING file for more information.
+#
+# Copy the whole dataset from one Redis instance to another one
+#
+# WARNING: this utility is deprecated and serves as a legacy adapter
+# for the more-robust redis-copy gem.
+
+require 'shellwords'
+
+def redisCopy(opts={})
+ src = "#{opts[:srchost]}:#{opts[:srcport]}"
+ dst = "#{opts[:dsthost]}:#{opts[:dstport]}"
+ `redis-copy #{src.shellescape} #{dst.shellescape}`
+rescue Errno::ENOENT
+ $stderr.puts 'This utility requires the redis-copy executable',
+ 'from the redis-copy gem on https://rubygems.org',
+ 'To install it, run `gem install redis-copy`.'
+ exit 1
+end
+
+$stderr.puts "This utility is deprecated. Use the redis-copy gem instead."
+if ARGV.length != 4
+ puts "Usage: redis-copy.rb <srchost> <srcport> <dsthost> <dstport>"
+ exit 1
+end
+puts "WARNING: it's up to you to FLUSHDB the destination host before to continue, press any key when ready."
+STDIN.gets
+srchost = ARGV[0]
+srcport = ARGV[1]
+dsthost = ARGV[2]
+dstport = ARGV[3]
+puts "Copying #{srchost}:#{srcport} into #{dsthost}:#{dstport}"
+redisCopy(:srchost => srchost, :srcport => srcport.to_i,
+ :dsthost => dsthost, :dstport => dstport.to_i)
diff --git a/utils/redis-sha1.rb b/utils/redis-sha1.rb
new file mode 100644
index 0000000..6a8b4f3
--- /dev/null
+++ b/utils/redis-sha1.rb
@@ -0,0 +1,52 @@
+# redis-sha1.rb - Copyright (C) 2009 Salvatore Sanfilippo
+# BSD license, See the COPYING file for more information.
+#
+# Performs the SHA1 sum of the whole dataset.
+# This is useful to spot bugs in persistence related code and to make sure
+# Slaves and Masters are in SYNC.
+#
+# If you hack this code make sure to sort keys and set elements as this are
+# unsorted elements. Otherwise the sum may differ with equal dataset.
+
+require 'rubygems'
+require 'redis'
+require 'digest/sha1'
+
+def redisSha1(opts={})
+ sha1=""
+ r = Redis.new(opts)
+ r.keys('*').sort.each{|k|
+ vtype = r.type?(k)
+ if vtype == "string"
+ len = 1
+ sha1 = Digest::SHA1.hexdigest(sha1+k)
+ sha1 = Digest::SHA1.hexdigest(sha1+r.get(k))
+ elsif vtype == "list"
+ len = r.llen(k)
+ if len != 0
+ sha1 = Digest::SHA1.hexdigest(sha1+k)
+ sha1 = Digest::SHA1.hexdigest(sha1+r.list_range(k,0,-1).join("\x01"))
+ end
+ elsif vtype == "set"
+ len = r.scard(k)
+ if len != 0
+ sha1 = Digest::SHA1.hexdigest(sha1+k)
+ sha1 = Digest::SHA1.hexdigest(sha1+r.set_members(k).to_a.sort.join("\x02"))
+ end
+ elsif vtype == "zset"
+ len = r.zcard(k)
+ if len != 0
+ sha1 = Digest::SHA1.hexdigest(sha1+k)
+ sha1 = Digest::SHA1.hexdigest(sha1+r.zrange(k,0,-1).join("\x01"))
+ end
+ end
+ # puts "#{k} => #{sha1}" if len != 0
+ }
+ sha1
+end
+
+host = ARGV[0] || "127.0.0.1"
+port = ARGV[1] || "6379"
+db = ARGV[2] || "0"
+puts "Performing SHA1 of Redis server #{host} #{port} DB: #{db}"
+p "Dataset SHA1: #{redisSha1(:host => host, :port => port.to_i, :db => db)}"
diff --git a/utils/redis_init_script b/utils/redis_init_script
new file mode 100755
index 0000000..006db87
--- /dev/null
+++ b/utils/redis_init_script
@@ -0,0 +1,50 @@
+#!/bin/sh
+#
+# Simple Redis init.d script conceived to work on Linux systems
+# as it does use of the /proc filesystem.
+
+### BEGIN INIT INFO
+# Provides: redis_6379
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Redis data structure server
+# Description: Redis data structure server. See https://redis.io
+### END INIT INFO
+
+REDISPORT=6379
+EXEC=/usr/local/bin/redis-server
+CLIEXEC=/usr/local/bin/redis-cli
+
+PIDFILE=/var/run/redis_${REDISPORT}.pid
+CONF="/etc/redis/${REDISPORT}.conf"
+
+case "$1" in
+ start)
+ if [ -f $PIDFILE ]
+ then
+ echo "$PIDFILE exists, process is already running or crashed"
+ else
+ echo "Starting Redis server..."
+ $EXEC $CONF
+ fi
+ ;;
+ stop)
+ if [ ! -f $PIDFILE ]
+ then
+ echo "$PIDFILE does not exist, process is not running"
+ else
+ PID=$(cat $PIDFILE)
+ echo "Stopping ..."
+ $CLIEXEC -p $REDISPORT shutdown
+ while [ -x /proc/${PID} ]
+ do
+ echo "Waiting for Redis to shutdown ..."
+ sleep 1
+ done
+ echo "Redis stopped"
+ fi
+ ;;
+ *)
+ echo "Please use start or stop as first argument"
+ ;;
+esac
diff --git a/utils/redis_init_script.tpl b/utils/redis_init_script.tpl
new file mode 100755
index 0000000..2e5b613
--- /dev/null
+++ b/utils/redis_init_script.tpl
@@ -0,0 +1,44 @@
+
+case "$1" in
+ start)
+ if [ -f $PIDFILE ]
+ then
+ echo "$PIDFILE exists, process is already running or crashed"
+ else
+ echo "Starting Redis server..."
+ $EXEC $CONF
+ fi
+ ;;
+ stop)
+ if [ ! -f $PIDFILE ]
+ then
+ echo "$PIDFILE does not exist, process is not running"
+ else
+ PID=$(cat $PIDFILE)
+ echo "Stopping ..."
+ $CLIEXEC -p $REDISPORT shutdown
+ while [ -x /proc/${PID} ]
+ do
+ echo "Waiting for Redis to shutdown ..."
+ sleep 1
+ done
+ echo "Redis stopped"
+ fi
+ ;;
+ status)
+ PID=$(cat $PIDFILE)
+ if [ ! -x /proc/${PID} ]
+ then
+ echo 'Redis is not running'
+ else
+ echo "Redis is running ($PID)"
+ fi
+ ;;
+ restart)
+ $0 stop
+ $0 start
+ ;;
+ *)
+ echo "Please use start, stop, restart or status as first argument"
+ ;;
+esac
diff --git a/utils/releasetools/01_create_tarball.sh b/utils/releasetools/01_create_tarball.sh
new file mode 100755
index 0000000..366a61e
--- /dev/null
+++ b/utils/releasetools/01_create_tarball.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+if [ $# != "1" ]
+then
+ echo "Usage: ./utils/releasetools/01_create_tarball.sh <version_tag>"
+ exit 1
+fi
+
+TAG=$1
+TARNAME="redis-${TAG}.tar"
+echo "Generating /tmp/${TARNAME}"
+git archive $TAG --prefix redis-${TAG}/ > /tmp/$TARNAME || exit 1
+echo "Gizipping the archive"
+rm -f /tmp/$TARNAME.gz
+gzip -9 /tmp/$TARNAME
diff --git a/utils/releasetools/02_upload_tarball.sh b/utils/releasetools/02_upload_tarball.sh
new file mode 100755
index 0000000..ef1e777
--- /dev/null
+++ b/utils/releasetools/02_upload_tarball.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+if [ $# != "1" ]
+then
+ echo "Usage: ./utils/releasetools/02_upload_tarball.sh <version_tag>"
+ exit 1
+fi
+
+echo "Uploading..."
+scp /tmp/redis-${1}.tar.gz ubuntu@host.redis.io:/var/www/download/releases/
+echo "Updating web site... "
+echo "Please check the github action tests for the release."
+echo "Press any key if it is a stable release, or Ctrl+C to abort"
+read x
+ssh ubuntu@host.redis.io "cd /var/www/download;
+ rm -rf redis-${1}.tar.gz;
+ wget http://download.redis.io/releases/redis-${1}.tar.gz;
+ tar xvzf redis-${1}.tar.gz;
+ rm -rf redis-stable;
+ mv redis-${1} redis-stable;
+ tar cvzf redis-stable.tar.gz redis-stable;
+ rm -rf redis-${1}.tar.gz;
+ shasum -a 256 redis-stable.tar.gz > redis-stable.tar.gz.SHA256SUM;
+ "
diff --git a/utils/releasetools/03_test_release.sh b/utils/releasetools/03_test_release.sh
new file mode 100755
index 0000000..493d0b7
--- /dev/null
+++ b/utils/releasetools/03_test_release.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+set -e
+if [ $# != "1" ]
+then
+ echo "Usage: ./utils/releasetools/03_test_release.sh <version_tag>"
+ exit 1
+fi
+
+TAG=$1
+TARNAME="redis-${TAG}.tar.gz"
+DOWNLOADURL="http://download.redis.io/releases/${TARNAME}"
+
+echo "Doing sanity test on the actual tarball"
+
+cd /tmp
+rm -rf test_release_tmp_dir
+mkdir test_release_tmp_dir
+cd test_release_tmp_dir
+rm -f $TARNAME
+rm -rf redis-${TAG}
+wget $DOWNLOADURL
+tar xvzf $TARNAME
+cd redis-${TAG}
+make
+./runtest
+./runtest-sentinel
+./runtest-cluster
+./runtest-moduleapi
diff --git a/utils/releasetools/04_release_hash.sh b/utils/releasetools/04_release_hash.sh
new file mode 100755
index 0000000..d932928
--- /dev/null
+++ b/utils/releasetools/04_release_hash.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+if [ $# != "1" ]
+then
+ echo "Usage: ./utils/releasetools/04_release_hash.sh <version_tag>"
+ exit 1
+fi
+
+SHA=$(curl -s http://download.redis.io/releases/redis-${1}.tar.gz | shasum -a 256 | cut -f 1 -d' ')
+ENTRY="hash redis-${1}.tar.gz sha256 $SHA http://download.redis.io/releases/redis-${1}.tar.gz"
+echo $ENTRY >> ../redis-hashes/README
+echo "Press any key to commit, Ctrl-C to abort)."
+read yes
+(cd ../redis-hashes; git commit -a -m "${1} hash."; git push)
diff --git a/utils/releasetools/changelog.tcl b/utils/releasetools/changelog.tcl
new file mode 100755
index 0000000..2288794
--- /dev/null
+++ b/utils/releasetools/changelog.tcl
@@ -0,0 +1,35 @@
+#!/usr/bin/env tclsh
+
+if {[llength $::argv] != 2 && [llength $::argv] != 3} {
+ puts "Usage: $::argv0 <branch> <version> \[<num-commits>\]"
+ exit 1
+}
+
+set branch [lindex $::argv 0]
+set ver [lindex $::argv 1]
+if {[llength $::argv] == 3} {
+ set count [lindex ::$argv 2]
+} else {
+ set count 100
+}
+
+set template {
+================================================================================
+Redis %ver% Released %date%
+================================================================================
+
+Upgrade urgency <URGENCY>: <DESCRIPTION>
+}
+
+set template [string trim $template]
+append template "\n\n"
+set date [clock format [clock seconds]]
+set template [string map [list %ver% $ver %date% $date] $template]
+
+append template [exec git log $branch~$count..$branch "--format=format:%an in commit %h:%n %s" --shortstat]
+
+#Older, more verbose version.
+#
+#append template [exec git log $branch~30..$branch "--format=format:+-------------------------------------------------------------------------------%n| %s%n| By %an, %ai%n+--------------------------------------------------------------------------------%nhttps://github.com/redis/redis/commit/%H%n%n%b" --stat]
+
+puts $template
diff --git a/utils/reply_schema_linter.js b/utils/reply_schema_linter.js
new file mode 100644
index 0000000..e2358d4
--- /dev/null
+++ b/utils/reply_schema_linter.js
@@ -0,0 +1,31 @@
+function validate_schema(command_schema) {
+ var error_status = false
+ const Ajv = require("ajv/dist/2019")
+ const ajv = new Ajv({strict: true, strictTuples: false})
+ let json = require('../src/commands/'+ command_schema);
+ for (var item in json) {
+ const schema = json[item].reply_schema
+ if (schema == undefined)
+ continue;
+ try {
+ ajv.compile(schema)
+ } catch (error) {
+ console.error(command_schema + " : " + error.toString())
+ error_status = true
+ }
+ }
+ return error_status
+}
+
+const schema_directory_path = './src/commands'
+const path = require('path')
+var fs = require('fs');
+var files = fs.readdirSync(schema_directory_path);
+jsonFiles = files.filter(el => path.extname(el) === '.json')
+var error_status = false
+jsonFiles.forEach(function(file){
+ if (validate_schema(file))
+ error_status = true
+})
+if (error_status)
+ process.exit(1)
diff --git a/utils/req-res-log-validator.py b/utils/req-res-log-validator.py
new file mode 100755
index 0000000..46c1100
--- /dev/null
+++ b/utils/req-res-log-validator.py
@@ -0,0 +1,350 @@
+#!/usr/bin/env python3
+import os
+import glob
+import json
+import sys
+
+import jsonschema
+import subprocess
+import redis
+import time
+import argparse
+import multiprocessing
+import collections
+import io
+import traceback
+from datetime import timedelta
+from functools import partial
+try:
+ from jsonschema import Draft201909Validator as schema_validator
+except ImportError:
+ from jsonschema import Draft7Validator as schema_validator
+
+"""
+The purpose of this file is to validate the reply_schema values of COMMAND DOCS.
+Basically, this is what it does:
+1. Goes over req-res files, generated by redis-servers, spawned by the testsuite (see logreqres.c)
+2. For each request-response pair, it validates the response against the request's reply_schema (obtained from COMMAND DOCS)
+
+This script spins up a redis-server and a redis-cli in order to obtain COMMAND DOCS.
+
+In order to use this file you must run the redis testsuite with the following flags:
+./runtest --dont-clean --force-resp3 --log-req-res
+
+And then:
+./utils/req-res-log-validator.py
+
+The script will fail only if:
+1. One or more of the replies doesn't comply with its schema.
+2. One or more of the commands in COMMANDS DOCS doesn't have the reply_schema field (with --fail-missing-reply-schemas)
+3. The testsuite didn't execute all of the commands (with --fail-commands-not-all-hit)
+
+Future validations:
+1. Fail the script if one or more of the branches of the reply schema (e.g. oneOf, anyOf) was not hit.
+"""
+
+IGNORED_COMMANDS = {
+ # Commands that don't work in a req-res manner (see logreqres.c)
+ "debug", # because of DEBUG SEGFAULT
+ "sync",
+ "psync",
+ "monitor",
+ "subscribe",
+ "unsubscribe",
+ "ssubscribe",
+ "sunsubscribe",
+ "psubscribe",
+ "punsubscribe",
+ # Commands to which we decided not write a reply schema
+ "pfdebug",
+ "lolwut",
+}
+
+class Request(object):
+ """
+ This class represents a Redis request (AKA command, argv)
+ """
+ def __init__(self, f, docs, line_counter):
+ """
+ Read lines from `f` (generated by logreqres.c) and populates the argv array
+ """
+ self.command = None
+ self.schema = None
+ self.argv = []
+
+ while True:
+ line = f.readline()
+ line_counter[0] += 1
+ if not line:
+ break
+ length = int(line)
+ arg = str(f.read(length))
+ f.read(2) # read \r\n
+ line_counter[0] += 1
+ if arg == "__argv_end__":
+ break
+ self.argv.append(arg)
+
+ if not self.argv:
+ return
+
+ self.command = self.argv[0].lower()
+ doc = docs.get(self.command, {})
+ if not doc and len(self.argv) > 1:
+ self.command = f"{self.argv[0].lower()}|{self.argv[1].lower()}"
+ doc = docs.get(self.command, {})
+
+ if not doc:
+ self.command = None
+ return
+
+ self.schema = doc.get("reply_schema")
+
+ def __str__(self):
+ return json.dumps(self.argv)
+
+
+class Response(object):
+ """
+ This class represents a Redis response in RESP3
+ """
+ def __init__(self, f, line_counter):
+ """
+ Read lines from `f` (generated by logreqres.c) and build the JSON representing the response in RESP3
+ """
+ self.error = False
+ self.queued = False
+ self.json = None
+
+ line = f.readline()[:-2]
+ line_counter[0] += 1
+ if line[0] == '+':
+ self.json = line[1:]
+ if self.json == "QUEUED":
+ self.queued = True
+ elif line[0] == '-':
+ self.json = line[1:]
+ self.error = True
+ elif line[0] == '$':
+ self.json = str(f.read(int(line[1:])))
+ f.read(2) # read \r\n
+ line_counter[0] += 1
+ elif line[0] == ':':
+ self.json = int(line[1:])
+ elif line[0] == ',':
+ self.json = float(line[1:])
+ elif line[0] == '_':
+ self.json = None
+ elif line[0] == '#':
+ self.json = line[1] == 't'
+ elif line[0] == '!':
+ self.json = str(f.read(int(line[1:])))
+ f.read(2) # read \r\n
+ line_counter[0] += 1
+ self.error = True
+ elif line[0] == '=':
+ self.json = str(f.read(int(line[1:])))[4:] # skip "txt:" or "mkd:"
+ f.read(2) # read \r\n
+ line_counter[0] += 1 + self.json.count("\r\n")
+ elif line[0] == '(':
+ self.json = line[1:] # big-number is actually a string
+ elif line[0] in ['*', '~', '>']: # unfortunately JSON doesn't tell the difference between a list and a set
+ self.json = []
+ count = int(line[1:])
+ for i in range(count):
+ ele = Response(f, line_counter)
+ self.json.append(ele.json)
+ elif line[0] in ['%', '|']:
+ self.json = {}
+ count = int(line[1:])
+ for i in range(count):
+ field = Response(f, line_counter)
+ # Redis allows fields to be non-strings but JSON doesn't.
+ # Luckily, for any kind of response we can validate, the fields are
+ # always strings (example: XINFO STREAM)
+ # The reason we can't always convert to string is because of DEBUG PROTOCOL MAP
+ # which anyway doesn't have a schema
+ if isinstance(field.json, str):
+ field = field.json
+ value = Response(f, line_counter)
+ self.json[field] = value.json
+ if line[0] == '|':
+ # We don't care about the attributes, read the real response
+ real_res = Response(f, line_counter)
+ self.__dict__.update(real_res.__dict__)
+
+
+ def __str__(self):
+ return json.dumps(self.json)
+
+
+def process_file(docs, path):
+ """
+ This function processes a single file generated by logreqres.c
+ """
+ line_counter = [0] # A list with one integer: to force python to pass it by reference
+ command_counter = dict()
+
+ print(f"Processing {path} ...")
+
+ # Convert file to StringIO in order to minimize IO operations
+ with open(path, "r", newline="\r\n", encoding="latin-1") as f:
+ content = f.read()
+
+ with io.StringIO(content) as fakefile:
+ while True:
+ try:
+ req = Request(fakefile, docs, line_counter)
+ if not req.argv:
+ # EOF
+ break
+ res = Response(fakefile, line_counter)
+ except json.decoder.JSONDecodeError as err:
+ print(f"JSON decoder error while processing {path}:{line_counter[0]}: {err}")
+ print(traceback.format_exc())
+ raise
+ except Exception as err:
+ print(f"General error while processing {path}:{line_counter[0]}: {err}")
+ print(traceback.format_exc())
+ raise
+
+ if not req.command:
+ # Unknown command
+ continue
+
+ command_counter[req.command] = command_counter.get(req.command, 0) + 1
+
+ if res.error or res.queued:
+ continue
+
+ if req.command in IGNORED_COMMANDS:
+ continue
+
+ try:
+ jsonschema.validate(instance=res.json, schema=req.schema, cls=schema_validator)
+ except (jsonschema.ValidationError, jsonschema.exceptions.SchemaError) as err:
+ print(f"JSON schema validation error on {path}: {err}")
+ print(f"argv: {req.argv}")
+ try:
+ print(f"Response: {res}")
+ except UnicodeDecodeError as err:
+ print("Response: (unprintable)")
+ print(f"Schema: {json.dumps(req.schema, indent=2)}")
+ print(traceback.format_exc())
+ raise
+
+ return command_counter
+
+
+def fetch_schemas(cli, port, args, docs):
+ redis_proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+
+ while True:
+ try:
+ print('Connecting to Redis...')
+ r = redis.Redis(port=port)
+ r.ping()
+ break
+ except Exception as e:
+ time.sleep(0.1)
+
+ print('Connected')
+
+ cli_proc = subprocess.Popen([cli, '-p', str(port), '--json', 'command', 'docs'], stdout=subprocess.PIPE)
+ stdout, stderr = cli_proc.communicate()
+ docs_response = json.loads(stdout)
+
+ for name, doc in docs_response.items():
+ if "subcommands" in doc:
+ for subname, subdoc in doc["subcommands"].items():
+ docs[subname] = subdoc
+ else:
+ docs[name] = doc
+
+ redis_proc.terminate()
+ redis_proc.wait()
+
+
+if __name__ == '__main__':
+ # Figure out where the sources are
+ srcdir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../src")
+ testdir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../tests")
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--server', type=str, default='%s/redis-server' % srcdir)
+ parser.add_argument('--port', type=int, default=6534)
+ parser.add_argument('--cli', type=str, default='%s/redis-cli' % srcdir)
+ parser.add_argument('--module', type=str, action='append', default=[])
+ parser.add_argument('--verbose', action='store_true')
+ parser.add_argument('--fail-commands-not-all-hit', action='store_true')
+ parser.add_argument('--fail-missing-reply-schemas', action='store_true')
+ args = parser.parse_args()
+
+ docs = dict()
+
+ # Fetch schemas from a Redis instance
+ print('Starting Redis server')
+ redis_args = [args.server, '--port', str(args.port)]
+ for module in args.module:
+ redis_args += ['--loadmodule', 'tests/modules/%s.so' % module]
+
+ fetch_schemas(args.cli, args.port, redis_args, docs)
+
+ # Fetch schemas from a sentinel
+ print('Starting Redis sentinel')
+
+ # Sentinel needs a config file to start
+ config_file = "tmpsentinel.conf"
+ open(config_file, 'a').close()
+
+ sentinel_args = [args.server, config_file, '--port', str(args.port), "--sentinel"]
+ fetch_schemas(args.cli, args.port, sentinel_args, docs)
+ os.unlink(config_file)
+
+ missing_schema = [k for k, v in docs.items()
+ if "reply_schema" not in v and k not in IGNORED_COMMANDS]
+ if missing_schema:
+ print("WARNING! The following commands are missing a reply_schema:")
+ for k in sorted(missing_schema):
+ print(f" {k}")
+ if args.fail_missing_reply_schemas:
+ print("ERROR! at least one command does not have a reply_schema")
+ sys.exit(1)
+
+ start = time.time()
+
+ # Obtain all the files to processes
+ paths = []
+ for path in glob.glob('%s/tmp/*/*.reqres' % testdir):
+ paths.append(path)
+
+ for path in glob.glob('%s/cluster/tmp/*/*.reqres' % testdir):
+ paths.append(path)
+
+ for path in glob.glob('%s/sentinel/tmp/*/*.reqres' % testdir):
+ paths.append(path)
+
+ counter = collections.Counter()
+ # Spin several processes to handle the files in parallel
+ with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:
+ func = partial(process_file, docs)
+ # pool.map blocks until all the files have been processed
+ for result in pool.map(func, paths):
+ counter.update(result)
+ command_counter = dict(counter)
+
+ elapsed = time.time() - start
+ print(f"Done. ({timedelta(seconds=elapsed)})")
+ print("Hits per command:")
+ for k, v in sorted(command_counter.items()):
+ print(f" {k}: {v}")
+ not_hit = set(set(docs.keys()) - set(command_counter.keys()) - set(IGNORED_COMMANDS))
+ if not_hit:
+ if args.verbose:
+ print("WARNING! The following commands were not hit at all:")
+ for k in sorted(not_hit):
+ print(f" {k}")
+ if args.fail_commands_not_all_hit:
+ print("ERROR! at least one command was not hit by the tests")
+ sys.exit(1)
+
diff --git a/utils/req-res-validator/requirements.txt b/utils/req-res-validator/requirements.txt
new file mode 100644
index 0000000..0e3024b
--- /dev/null
+++ b/utils/req-res-validator/requirements.txt
@@ -0,0 +1,2 @@
+jsonschema==4.17.3
+redis==4.5.1 \ No newline at end of file
diff --git a/utils/speed-regression.tcl b/utils/speed-regression.tcl
new file mode 100755
index 0000000..bf35c7d
--- /dev/null
+++ b/utils/speed-regression.tcl
@@ -0,0 +1,130 @@
+#!/usr/bin/env tclsh8.5
+# Copyright (C) 2011 Salvatore Sanfilippo
+# Released under the BSD license like Redis itself
+
+source ../tests/support/redis.tcl
+set ::port 12123
+set ::tests {PING,SET,GET,INCR,LPUSH,LPOP,SADD,SPOP,LRANGE_100,LRANGE_600,MSET}
+set ::datasize 16
+set ::requests 100000
+
+proc run-tests branches {
+ set runs {}
+ set branch_id 0
+ foreach b $branches {
+ cd ../src
+ puts "Benchmarking $b"
+ exec -ignorestderr git checkout $b 2> /dev/null
+ exec -ignorestderr make clean 2> /dev/null
+ puts " compiling..."
+ exec -ignorestderr make 2> /dev/null
+
+ if {$branch_id == 0} {
+ puts " copy redis-benchmark from unstable to /tmp..."
+ exec -ignorestderr cp ./redis-benchmark /tmp
+ incr branch_id
+ continue
+ }
+
+ # Start the Redis server
+ puts " starting the server... [exec ./redis-server -v]"
+ set pids [exec echo "port $::port\nloglevel warning\n" | ./redis-server - > /dev/null 2> /dev/null &]
+ puts " pids: $pids"
+ after 1000
+ puts " running the benchmark"
+
+ set r [redis 127.0.0.1 $::port]
+ set i [$r info]
+ puts " redis INFO shows version: [lindex [split $i] 0]"
+ $r close
+
+ set output [exec /tmp/redis-benchmark -n $::requests -t $::tests -d $::datasize --csv -p $::port]
+ lappend runs $b $output
+ puts " killing server..."
+ catch {exec kill -9 [lindex $pids 0]}
+ catch {exec kill -9 [lindex $pids 1]}
+ incr branch_id
+ }
+ return $runs
+}
+
+proc get-result-with-name {output name} {
+ foreach line [split $output "\n"] {
+ lassign [split $line ","] key value
+ set key [string tolower [string range $key 1 end-1]]
+ set value [string range $value 1 end-1]
+ if {$key eq [string tolower $name]} {
+ return $value
+ }
+ }
+ return "n/a"
+}
+
+proc get-test-names output {
+ set names {}
+ foreach line [split $output "\n"] {
+ lassign [split $line ","] key value
+ set key [string tolower [string range $key 1 end-1]]
+ lappend names $key
+ }
+ return $names
+}
+
+proc combine-results {results} {
+ set tests [get-test-names [lindex $results 1]]
+ foreach test $tests {
+ puts $test
+ foreach {branch output} $results {
+ puts [format "%-20s %s" \
+ $branch [get-result-with-name $output $test]]
+ }
+ puts {}
+ }
+}
+
+proc main {} {
+ # Note: the first branch is only used in order to get the redis-benchmark
+ # executable. Tests are performed starting from the second branch.
+ set branches {
+ slowset 2.2.0 2.4.0 unstable slowset
+ }
+ set results [run-tests $branches]
+ puts "\n"
+ puts "# Test results: datasize=$::datasize requests=$::requests"
+ puts [combine-results $results]
+}
+
+# Force the user to run the script from the 'utils' directory.
+if {![file exists speed-regression.tcl]} {
+ puts "Please make sure to run speed-regression.tcl while inside /utils."
+ puts "Example: cd utils; ./speed-regression.tcl"
+ exit 1
+}
+
+# Make sure there is not already a server running on port 12123
+set is_not_running [catch {set r [redis 127.0.0.1 $::port]}]
+if {!$is_not_running} {
+ puts "Sorry, you have a running server on port $::port"
+ exit 1
+}
+
+# parse arguments
+for {set j 0} {$j < [llength $argv]} {incr j} {
+ set opt [lindex $argv $j]
+ set arg [lindex $argv [expr $j+1]]
+ if {$opt eq {--tests}} {
+ set ::tests $arg
+ incr j
+ } elseif {$opt eq {--datasize}} {
+ set ::datasize $arg
+ incr j
+ } elseif {$opt eq {--requests}} {
+ set ::requests $arg
+ incr j
+ } else {
+ puts "Wrong argument: $opt"
+ exit 1
+ }
+}
+
+main
diff --git a/utils/srandmember/README.md b/utils/srandmember/README.md
new file mode 100644
index 0000000..d3da1e8
--- /dev/null
+++ b/utils/srandmember/README.md
@@ -0,0 +1,14 @@
+The utilities in this directory plot the distribution of SRANDMEMBER to
+evaluate how fair it is.
+
+See http://theshfl.com/redis_sets for more information on the topic that lead
+to such investigation fix.
+
+showdist.rb -- shows the distribution of the frequency elements are returned.
+ The x axis is the number of times elements were returned, and
+ the y axis is how many elements were returned with such
+ frequency.
+
+showfreq.rb -- shows the frequency each element was returned.
+ The x axis is the element number.
+ The y axis is the times it was returned.
diff --git a/utils/srandmember/showdist.rb b/utils/srandmember/showdist.rb
new file mode 100644
index 0000000..2435857
--- /dev/null
+++ b/utils/srandmember/showdist.rb
@@ -0,0 +1,33 @@
+require 'redis'
+
+r = Redis.new
+r.select(9)
+r.del("myset");
+r.sadd("myset",(0..999).to_a)
+freq = {}
+100.times {
+ res = r.pipelined {
+ 1000.times {
+ r.srandmember("myset")
+ }
+ }
+ res.each{|ele|
+ freq[ele] = 0 if freq[ele] == nil
+ freq[ele] += 1
+ }
+}
+
+# Convert into frequency distribution
+dist = {}
+freq.each{|item,count|
+ dist[count] = 0 if dist[count] == nil
+ dist[count] += 1
+}
+
+min = dist.keys.min
+max = dist.keys.max
+(min..max).each{|x|
+ count = dist[x]
+ count = 0 if count == nil
+ puts "#{x} -> #{"*"*count}"
+}
diff --git a/utils/srandmember/showfreq.rb b/utils/srandmember/showfreq.rb
new file mode 100644
index 0000000..625519c
--- /dev/null
+++ b/utils/srandmember/showfreq.rb
@@ -0,0 +1,23 @@
+require 'redis'
+
+r = Redis.new
+r.select(9)
+r.del("myset");
+r.sadd("myset",(0..999).to_a)
+freq = {}
+500.times {
+ res = r.pipelined {
+ 1000.times {
+ r.srandmember("myset")
+ }
+ }
+ res.each{|ele|
+ freq[ele] = 0 if freq[ele] == nil
+ freq[ele] += 1
+ }
+}
+
+# Print the frequency each element was yield to process it with gnuplot
+freq.each{|item,count|
+ puts "#{item} #{count}"
+}
diff --git a/utils/systemd-redis_multiple_servers@.service b/utils/systemd-redis_multiple_servers@.service
new file mode 100644
index 0000000..108ccfc
--- /dev/null
+++ b/utils/systemd-redis_multiple_servers@.service
@@ -0,0 +1,37 @@
+# example systemd template service unit file for multiple redis-servers
+#
+# You can use this file as a blueprint for your actual template service unit
+# file, if you intend to run multiple independent redis-server instances in
+# parallel using systemd's "template unit files" feature. If you do, you will
+# want to choose a better basename for your service unit by renaming this file
+# when copying it.
+#
+# Please take a look at the provided "systemd-redis_server.service" example
+# service unit file, too, if you choose to use this approach at managing
+# multiple redis-server instances via systemd.
+
+[Unit]
+Description=Redis data structure server - instance %i
+Documentation=https://redis.io/documentation
+# This template unit assumes your redis-server configuration file(s)
+# to live at /etc/redis/redis_server_<INSTANCE_NAME>.conf
+AssertPathExists=/etc/redis/redis_server_%i.conf
+#Before=your_application.service another_example_application.service
+#AssertPathExists=/var/lib/redis
+
+[Service]
+ExecStart=/usr/local/bin/redis-server /etc/redis/redis_server_%i.conf
+LimitNOFILE=10032
+NoNewPrivileges=yes
+#OOMScoreAdjust=-900
+#PrivateTmp=yes
+Type=notify
+TimeoutStartSec=infinity
+TimeoutStopSec=infinity
+UMask=0077
+#User=redis
+#Group=redis
+#WorkingDirectory=/var/lib/redis
+
+[Install]
+WantedBy=multi-user.target
diff --git a/utils/systemd-redis_server.service b/utils/systemd-redis_server.service
new file mode 100644
index 0000000..cf15864
--- /dev/null
+++ b/utils/systemd-redis_server.service
@@ -0,0 +1,43 @@
+# example systemd service unit file for redis-server
+#
+# In order to use this as a template for providing a redis service in your
+# environment, _at the very least_ make sure to adapt the redis configuration
+# file you intend to use as needed (make sure to set "supervised systemd"), and
+# to set sane TimeoutStartSec and TimeoutStopSec property values in the unit's
+# "[Service]" section to fit your needs.
+#
+# Some properties, such as User= and Group=, are highly desirable for virtually
+# all deployments of redis, but cannot be provided in a manner that fits all
+# expectable environments. Some of these properties have been commented out in
+# this example service unit file, but you are highly encouraged to set them to
+# fit your needs.
+#
+# Please refer to systemd.unit(5), systemd.service(5), and systemd.exec(5) for
+# more information.
+
+[Unit]
+Description=Redis data structure server
+Documentation=https://redis.io/documentation
+#Before=your_application.service another_example_application.service
+#AssertPathExists=/var/lib/redis
+Wants=network-online.target
+After=network-online.target
+
+[Service]
+ExecStart=/usr/local/bin/redis-server --supervised systemd --daemonize no
+## Alternatively, have redis-server load a configuration file:
+#ExecStart=/usr/local/bin/redis-server /path/to/your/redis.conf
+LimitNOFILE=10032
+NoNewPrivileges=yes
+#OOMScoreAdjust=-900
+#PrivateTmp=yes
+Type=notify
+TimeoutStartSec=infinity
+TimeoutStopSec=infinity
+UMask=0077
+#User=redis
+#Group=redis
+#WorkingDirectory=/var/lib/redis
+
+[Install]
+WantedBy=multi-user.target
diff --git a/utils/tracking_collisions.c b/utils/tracking_collisions.c
new file mode 100644
index 0000000..f521111
--- /dev/null
+++ b/utils/tracking_collisions.c
@@ -0,0 +1,76 @@
+/* This is a small program used in order to understand the collision rate
+ * of CRC64 (ISO version) VS other stronger hashing functions in the context
+ * of hashing keys for the Redis "tracking" feature (client side caching
+ * assisted by the server).
+ *
+ * The program attempts to hash keys with common names in the form of
+ *
+ * prefix:<counter>
+ *
+ * And counts the resulting collisions generated in the 24 bits of output
+ * needed for the tracking feature invalidation table (16 millions + entries)
+ *
+ * Compile with:
+ *
+ * cc -O2 ./tracking_collisions.c ../src/crc64.c ../src/sha1.c
+ * ./a.out
+ *
+ * --------------------------------------------------------------------------
+ *
+ * Copyright (C) 2019 Salvatore Sanfilippo
+ * This code is released under the BSD 2 clause license.
+ */
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include "../src/crc64.h"
+#include "../src/sha1.h"
+
+#define TABLE_SIZE (1<<24)
+int Table[TABLE_SIZE];
+
+uint64_t crc64Hash(char *key, size_t len) {
+ return crc64(0,(unsigned char*)key,len);
+}
+
+uint64_t sha1Hash(char *key, size_t len) {
+ SHA1_CTX ctx;
+ unsigned char hash[20];
+
+ SHA1Init(&ctx);
+ SHA1Update(&ctx,(unsigned char*)key,len);
+ SHA1Final(hash,&ctx);
+ uint64_t hash64;
+ memcpy(&hash64,hash,sizeof(hash64));
+ return hash64;
+}
+
+/* Test the hashing function provided as callback and return the
+ * number of collisions found. */
+unsigned long testHashingFunction(uint64_t (*hash)(char *, size_t)) {
+ unsigned long collisions = 0;
+ memset(Table,0,sizeof(Table));
+ char *prefixes[] = {"object", "message", "user", NULL};
+ for (int i = 0; prefixes[i] != NULL; i++) {
+ for (int j = 0; j < TABLE_SIZE/2; j++) {
+ char keyname[128];
+ size_t keylen = snprintf(keyname,sizeof(keyname),"%s:%d",
+ prefixes[i],j);
+ uint64_t bucket = hash(keyname,keylen) % TABLE_SIZE;
+ if (Table[bucket]) {
+ collisions++;
+ } else {
+ Table[bucket] = 1;
+ }
+ }
+ }
+ return collisions;
+}
+
+int main(void) {
+ printf("SHA1 : %lu\n", testHashingFunction(sha1Hash));
+ printf("CRC64: %lu\n", testHashingFunction(crc64Hash));
+ return 0;
+}
diff --git a/utils/whatisdoing.sh b/utils/whatisdoing.sh
new file mode 100755
index 0000000..68d7f7c
--- /dev/null
+++ b/utils/whatisdoing.sh
@@ -0,0 +1,24 @@
+# This script is from http://poormansprofiler.org/
+#
+# NOTE: Instead of using this script, you should use the Redis
+# Software Watchdog, which provides a similar functionality but in
+# a more reliable / easy to use way.
+#
+# Check https://redis.io/topics/latency for more information.
+
+#!/bin/bash
+nsamples=1
+sleeptime=0
+pid=$(ps auxww | grep '[r]edis-server' | awk '{print $2}')
+
+for x in $(seq 1 $nsamples)
+ do
+ gdb -ex "set pagination 0" -ex "thread apply all bt" -batch -p $pid
+ sleep $sleeptime
+ done | \
+awk '
+ BEGIN { s = ""; }
+ /Thread/ { print s; s = ""; }
+ /^\#/ { if (s != "" ) { s = s "," $4} else { s = $4 } }
+ END { print s }' | \
+sort | uniq -c | sort -r -n -k 1,1